[ovs-dev] [PATCH 02/13] Upstream GRE : Refactor code GRE code.

Isaku Yamahata yamahata at valinux.co.jp
Thu Dec 6 04:51:35 UTC 2012


On Thu, Nov 22, 2012 at 07:56:28AM -0800, Pravin B Shelar wrote:
> From: Pravin Shelar <pshelar at nicira.com>
> 
> Following patch refactors GRE code into ip tunneling code and GRE
> specific code. This patch also introduces new tunneling protocol
> handler so that multiple modules are register protocol handler for
> same protocol simultaneously.
> 
> Goal of this work is to reuse ip-tunneling part in other tunneling
> implementations. E.g. code related to device management, packet rx/tx.
> 
> Signed-off-by: Pravin B Shelar <pshelar at nicira.com>
> ---
>  include/linux/if_tunnel.h |   16 -
>  include/net/gre.h         |    9 +
>  include/net/ipip.h        |  153 +++++-
>  net/ipv4/Kconfig          |    6 +
>  net/ipv4/Makefile         |    1 +
>  net/ipv4/gre.c            |  247 +++++++-
>  net/ipv4/ip_gre.c         | 1602 ++++++++-------------------------------------
>  net/ipv4/ip_tunnel.c      | 1283 ++++++++++++++++++++++++++++++++++++
>  net/ipv6/ip6_tunnel.c     |    1 +
>  9 files changed, 1975 insertions(+), 1343 deletions(-)
>  create mode 100644 net/ipv4/ip_tunnel.c
> 
> diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
> index f4e56ec..9234965 100644
> --- a/include/linux/if_tunnel.h
> +++ b/include/linux/if_tunnel.h
> @@ -6,20 +6,4 @@
>  #include <uapi/linux/if_tunnel.h>
>  #include <linux/u64_stats_sync.h>
>  
> -/*
> - * Locking : hash tables are protected by RCU and RTNL
> - */
> -
> -#define for_each_ip_tunnel_rcu(pos, start) \
> -	for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
> -
> -/* often modified stats are per cpu, other are shared (netdev->stats) */
> -struct pcpu_tstats {
> -	u64	rx_packets;
> -	u64	rx_bytes;
> -	u64	tx_packets;
> -	u64	tx_bytes;
> -	struct u64_stats_sync	syncp;
> -};
> -
>  #endif /* _IF_TUNNEL_H_ */
> diff --git a/include/net/gre.h b/include/net/gre.h
> index 8266547..dda547a 100644
> --- a/include/net/gre.h
> +++ b/include/net/gre.h
> @@ -2,6 +2,7 @@
>  #define __LINUX_GRE_H
>  
>  #include <linux/skbuff.h>
> +#include <net/ipip.h>
>  
>  #define GREPROTO_CISCO		0
>  #define GREPROTO_PPTP		1
> @@ -15,4 +16,12 @@ struct gre_protocol {
>  int gre_add_protocol(const struct gre_protocol *proto, u8 version);
>  int gre_del_protocol(const struct gre_protocol *proto, u8 version);
>  
> +struct sk_buff *gre_build_header(struct sk_buff *skb,
> +				  const struct tnl_ptk_info *tpi);
> +struct gre_base_hdr {
> +	__be16 flags;
> +	__be16 protocol;
> +};
> +#define GRE_HEADER_SECTION 4
> +
>  #endif
> diff --git a/include/net/ipip.h b/include/net/ipip.h
> index 21947cf..a14bde8 100644
> --- a/include/net/ipip.h
> +++ b/include/net/ipip.h
> @@ -2,7 +2,9 @@
>  #define __NET_IPIP_H 1
>  
>  #include <linux/if_tunnel.h>
> +#include <net/dsfield.h>
>  #include <net/gro_cells.h>
> +#include <net/inet_ecn.h>
>  #include <net/ip.h>
>  
>  /* Keep error state on tunnel for 30 sec */
> @@ -26,7 +28,7 @@ struct ip_tunnel {
>  	/* These four fields used only by GRE */
>  	__u32			i_seqno;	/* The last seen seqno	*/
>  	__u32			o_seqno;	/* The last output seqno */
> -	int			hlen;		/* Precalculated GRE header length */
> +	int			hlen;		/* Precalculated header length */
>  	int			mlink;
>  
>  	struct ip_tunnel_parm	parms;
> @@ -37,10 +39,31 @@ struct ip_tunnel {
>  #endif
>  	struct ip_tunnel_prl_entry __rcu *prl;		/* potential router list */
>  	unsigned int			prl_count;	/* # of entries in PRL */
> -
> +	int ipt_net_id;
>  	struct gro_cells		gro_cells;
>  };
>  
> +/*
> + * Locking : hash tables are protected by RCU and RTNL
> + */
> +
> +#define for_each_ip_tunnel_rcu(t, start) \
> +	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
> +
> +
> +#define TUNNEL_CSUM        __cpu_to_be16(0x8000)
> +#define TUNNEL_KEY         __cpu_to_be16(0x2000)
> +#define TUNNEL_SEQ         __cpu_to_be16(0x1000)
> +
> +struct tnl_ptk_info {
> +	__be16 flags;
> +	__be16 proto;
> +	__be32 key;
> +	__be32 seq;
> +	int hdr_len;
> +	__sum16 csum;
> +};
> +
>  struct ip_tunnel_prl_entry {
>  	struct ip_tunnel_prl_entry __rcu *next;
>  	__be32				addr;
> @@ -48,6 +71,14 @@ struct ip_tunnel_prl_entry {
>  	struct rcu_head			rcu_head;
>  };
>  
> +struct pcpu_tstats {
> +	u64	rx_packets;
> +	u64	rx_bytes;
> +	u64	tx_packets;
> +	u64	tx_bytes;
> +	struct u64_stats_sync	syncp;
> +};
> +
>  static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
>  {
>  	int err;
> @@ -71,4 +102,122 @@ static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
>  	}
>  }
>  
> +#define HASH_ON_KEY	1
> +struct ip_tunnel_ops {
> +	u32  flags;
> +	int (*parse_netlink_parms)(struct ip_tunnel *vxlan,
> +				   struct nlattr *data[],
> +				   struct nlattr *tb[],
> +				   struct ip_tunnel_parm *parms);

const to data and tb


> +	int (*get_ioctl_param)(struct net_device *dev,
> +			       struct ifreq *ifr,
> +			       struct ip_tunnel_parm *p);
> +};
> +
> +#define IPT_HASH_BITS   10
> +#define IPT_HASH_SIZE   (1 << IPT_HASH_BITS)
> +
> +struct ip_tunnel_net {
> +	struct ip_tunnel __rcu **tunnels;
> +	struct net_device *fb_tunnel_dev;
> +	const struct ip_tunnel_ops *ops;
> +};
> +
> +enum ipt_type {
> +	IPT_GRE,
> +	IPT_VXLAN,
> +	IPT_MAX,
> +};
> +
> +struct ipt_protocol {
> +	int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
> +	int (*err_handler)(struct sk_buff *skb, u32 info,
> +			const struct tnl_ptk_info *tpi);
> +	u16 portno;
> +	u8 type;
> +	u8 priority;
> +	struct hlist_node node;
> +};
> +
> +#define IPT_HASH_BUCKETS	16
> +extern struct hlist_head ipt_proto[IPT_HASH_BUCKETS];
> +int ipt_add_protocol(struct ipt_protocol *proto);
> +void ipt_del_protocol(struct ipt_protocol *proto);
> +static inline struct hlist_head *ipt_hash_bucket(u8 type, u16 portno)
> +{
> +	return &ipt_proto[(type << 8 | portno) & (IPT_HASH_BUCKETS - 1)];
> +}
> +
> +int ip_tunnel_init(struct net_device *dev);
> +void ip_tunnel_uninit(struct net_device *dev);
> +void ip_tunnel_dev_free(struct net_device *dev);
> +void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
> +int __net_init ip_tunnel_init_net(struct net *net, int ipt_net_id,
> +				 struct rtnl_link_ops *ops);
> +
> +void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
> +
> +void ip_tunnel_err(struct ip_tunnel *t, struct sk_buff *skb, u32 info);
> +
> +int ip_tunnel_xmit(struct sk_buff *skb,
> +		   const struct iphdr *tiph,
> +		   const struct tnl_ptk_info *tpi,
> +		   struct sk_buff * (*build_header)(struct sk_buff *skb,
> +					 const struct tnl_ptk_info *tpi));
> +
> +int  ip_tunnel_build_iphdr(struct sk_buff *skb, struct net_device *dev,
> +			   const struct iphdr *tiph, int hlen,
> +			   struct iphdr  *niph);
> +int ip_tunnel_bind_dev(struct net_device *dev);
> +int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
> +int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
> +
> +struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
> +						struct rtnl_link_stats64 *tot);
> +void ip_tunnel_link(struct ip_tunnel_net *itn, struct ip_tunnel *t);
> +void ip_tunnel_unlink(struct ip_tunnel_net *itn, struct ip_tunnel *t);
> +struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
> +				   int link, __be16 flags,
> +				 __be32 remote, __be32 local,
> +				   __be32 key);
> +struct ip_tunnel *ip_tunnel_lookup_key(struct ip_tunnel_net *itn, __be32 key);
> +struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
> +				 struct ip_tunnel_parm *parms,
> +				 int type);
> +
> +int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
> +		  const struct tnl_ptk_info *tpi, bool log_ecn_error);
> +int ip_tunnel_newlink(struct net *src_net, struct net_device *dev,
> +		      struct nlattr *tb[],
> +		      struct nlattr *data[]);
> +int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
> +			 struct nlattr *data[]);
> +
> +static inline void ip_tunnel_setup(struct net_device *dev, int net_id)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	tunnel->ipt_net_id = net_id;
> +}
> +
> +/* Extract dsfield from inner protocol */
> +static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
> +				   const struct sk_buff *skb)
> +{
> +	if (skb->protocol == htons(ETH_P_IP))
> +		return iph->tos;
> +	else if (skb->protocol == htons(ETH_P_IPV6))
> +		return ipv6_get_dsfield((const struct ipv6hdr *)iph);
> +	else
> +		return 0;
> +}
> +
> +/* Propogate ECN bits out */
> +static inline u8 ip_tunnel_ecn_encap(u8 tos,
> +				 const struct iphdr *iph,
> +				 const struct sk_buff *skb)
> +{
> +	u8 inner = ip_tunnel_get_dsfield(iph, skb);
> +
> +	return INET_ECN_encapsulate(tos, inner);
> +}
>  #endif
> diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
> index 5a19aeb..4110ff3 100644
> --- a/net/ipv4/Kconfig
> +++ b/net/ipv4/Kconfig
> @@ -166,6 +166,7 @@ config IP_PNP_RARP
>  config NET_IPIP
>  	tristate "IP: tunneling"
>  	select INET_TUNNEL
> +	select NET_IP_TUNNEL
>  	---help---
>  	  Tunneling means encapsulating data of one protocol type within
>  	  another protocol and sending it over a channel that understands the
> @@ -186,9 +187,14 @@ config NET_IPGRE_DEMUX
>  	 This is helper module to demultiplex GRE packets on GRE version field criteria.
>  	 Required by ip_gre and pptp modules.
>  
> +config NET_IP_TUNNEL
> +	tristate
> +	default n
> +
>  config NET_IPGRE
>  	tristate "IP: GRE tunnels over IP"
>  	depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
> +	select NET_IP_TUNNEL
>  	help
>  	  Tunneling means encapsulating data of one protocol type within
>  	  another protocol and sending it over a channel that understands the
> diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
> index 15ca63e..089cb9f 100644
> --- a/net/ipv4/Makefile
> +++ b/net/ipv4/Makefile
> @@ -13,6 +13,7 @@ obj-y     := route.o inetpeer.o protocol.o \
>  	     fib_frontend.o fib_semantics.o fib_trie.o \
>  	     inet_fragment.o ping.o
>  
> +obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
>  obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
>  obj-$(CONFIG_PROC_FS) += proc.o
>  obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
> diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
> index 5a903dc..b837551 100644
> --- a/net/ipv4/gre.c
> +++ b/net/ipv4/gre.c
> @@ -16,13 +16,16 @@
>  #include <linux/kernel.h>
>  #include <linux/kmod.h>
>  #include <linux/skbuff.h>
> +#include <linux/if.h>
> +#include <linux/icmp.h>
>  #include <linux/in.h>
>  #include <linux/ip.h>
> +#include <linux/if_tunnel.h>
>  #include <linux/netdevice.h>
>  #include <linux/spinlock.h>
>  #include <net/protocol.h>
>  #include <net/gre.h>
> -
> +#include <net/icmp.h>
>  
>  static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
>  
> @@ -104,12 +107,241 @@ static void gre_err(struct sk_buff *skb, u32 info)
>  	rcu_read_unlock();
>  }
>  
> +struct sk_buff *gre_build_header(struct sk_buff *skb,
> +				 const struct tnl_ptk_info *tpi)
> +{
> +	struct iphdr *iph = ip_hdr(skb);
> +	struct gre_base_hdr *greh = (struct gre_base_hdr *)&iph[1];
> +	struct dst_entry *dst = skb_dst(skb);
> +
> +	greh->flags = tpi->flags;
> +	greh->protocol = tpi->proto;
> +	if (tpi->flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
> +		__be32 *ptr = (__be32 *)(((u8 *)greh) + tpi->hdr_len - 4);
> +
> +		if (tpi->flags&GRE_SEQ) {
> +			*ptr = tpi->seq;
> +			ptr--;
> +		}
> +		if (tpi->flags&GRE_KEY) {
> +			*ptr = tpi->key;
> +			ptr--;
> +		}
> +		if (tpi->flags&GRE_CSUM) {
> +			*(__sum16 *)ptr = 0;
> +			*(__sum16 *)ptr = csum_fold(skb_checksum(skb,
> +					   skb_transport_offset(skb),
> +					   skb->len - skb_transport_offset(skb),
> +						0));
> +		}
> +	}
> +	skb->local_df = 1;
> +	__ip_select_ident(ip_hdr(skb), dst, 0);
> +
> +	return skb;
> +}
> +EXPORT_SYMBOL(gre_build_header);
> +
> +
> +static __sum16 check_checksum(struct sk_buff *skb)
> +{
> +	struct iphdr *iph = ip_hdr(skb);
> +	struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
> +	__sum16 csum = 0;
> +
> +	if (greh->flags & GRE_CSUM) {
> +		switch (skb->ip_summed) {
> +		case CHECKSUM_COMPLETE:
> +			csum = csum_fold(skb->csum);
> +
> +			if (!csum)
> +				break;
> +			/* Fall through. */
> +
> +		case CHECKSUM_NONE:
> +			skb->csum = 0;
> +			csum = __skb_checksum_complete(skb);
> +			skb->ip_summed = CHECKSUM_COMPLETE;
> +			break;
> +		}
> +	}
> +
> +	return csum;
> +}
> +
> +static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi)
> +{
> +	struct gre_base_hdr *greh = (struct gre_base_hdr *)skb->data;
> +	__be32 *options = (__be32 *)(greh + 1);
> +
> +	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
> +		return -EINVAL;
> +
> +	tpi->flags = greh->flags;
> +	tpi->proto = greh->protocol;
> +
> +	tpi->hdr_len = GRE_HEADER_SECTION;
> +	tpi->csum = check_checksum(skb);
> +
> +	if (tpi->csum)
> +		return -EINVAL;
> +
> +	if (greh->flags & GRE_CSUM) {
> +		tpi->hdr_len += GRE_HEADER_SECTION;
> +		options++;
> +	}
> +
> +	if (greh->flags & GRE_KEY) {
> +		if ((void *)(options + 1) > (void *)skb_tail_pointer(skb))
> +			return -1;
> +		tpi->hdr_len += GRE_HEADER_SECTION;
> +		tpi->key = *options;
> +		options++;
> +	} else
> +		tpi->key = 0;
> +
> +	if (unlikely(greh->flags & GRE_SEQ)) {
> +		if ((void *) (options + 1) > (void *)skb_tail_pointer(skb))
> +			return -1;
> +
> +		tpi->seq = *options;
> +		tpi->hdr_len += GRE_HEADER_SECTION;
> +		options++;
> +	} else
> +		tpi->seq = 0;
> +
> +	/* WCCP version 1 and 2 protocol decoding.
> +	 * - Change protocol to IP
> +	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
> +	 */
> +	if (tpi->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
> +		tpi->proto = htons(ETH_P_IP);
> +		if ((*(u8 *)options & 0xF0) != 0x40)
> +			tpi->hdr_len += 4;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ipgre_rcv_v0(struct sk_buff *skb)
> +{
> +	struct tnl_ptk_info tpi;
> +	struct ipt_protocol *proto;
> +	struct hlist_node *n;
> +	struct hlist_head *head;
> +
> +	if (!pskb_may_pull(skb, 16))
> +		goto drop;
> +
> +	if (parse_gre_header(skb, &tpi) < 0)
> +		goto drop;
> +
> +	head = ipt_hash_bucket(IPT_GRE, 0);
> +	hlist_for_each_entry_rcu(proto, n, head, node) {
> +		int ret;
> +
> +		if (proto->type != IPT_GRE || proto->portno != 0)
> +			continue;
> +		ret = proto->handler(skb, &tpi);
> +		if (ret <= 0)
> +			return ret;
> +
> +	}
> +	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
> +
> +drop:
> +	kfree_skb(skb);
> +	return 0;
> +}
> +
> +static void ipgre_err_v0(struct sk_buff *skb, u32 info)
> +{
> +
> +	/* All the routers (except for Linux) return only
> +	 * 8 bytes of packet payload. It means, that precise relaying of
> +	 * ICMP in the real Internet is absolutely infeasible.
> +	 *
> +	 * Moreover, Cisco "wise men" put GRE key to the third word
> +	 * in GRE header. It makes impossible maintaining even soft
> +	 * state for keyed
> +	 * GRE tunnels with enabled checksum. Tell them "thank you".
> +	 *
> +	 * Well, I wonder, rfc1812 was written by Cisco employee,
> +	 * what the hell these idiots break standards established
> +	 * by themselves???
> +	 */
> +
> +	const int type = icmp_hdr(skb)->type;
> +	const int code = icmp_hdr(skb)->code;
> +	struct tnl_ptk_info tpi;
> +	struct hlist_node *n;
> +	struct hlist_head *head;
> +	struct ipt_protocol *proto;
> +
> +	if (!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN))
> +		return;
> +
> +	parse_gre_header(skb, &tpi);
> +
> +	if (tpi.csum)
> +		return;
> +
> +	/* If only 8 bytes returned, keyed message will be dropped here */
> +	if (tpi.flags & GRE_KEY) {
> +		if ((tpi.flags & GRE_CSUM) && (tpi.hdr_len < 12))
> +			return;
> +		if (tpi.hdr_len < 8)
> +			return;
> +	}
> +
> +	switch (type) {
> +	default:
> +	case ICMP_PARAMETERPROB:
> +		return;
> +
> +	case ICMP_DEST_UNREACH:
> +		switch (code) {
> +		case ICMP_SR_FAILED:
> +		case ICMP_PORT_UNREACH:
> +			/* Impossible event. */
> +		return;
> +		default:
> +			/* All others are translated to HOST_UNREACH.
> +			   rfc2003 contains "deep thoughts" about NET_UNREACH,
> +			   I believe they are just ether pollution. --ANK
> +			 */
> +		break;
> +		}
> +		break;
> +	case ICMP_TIME_EXCEEDED:
> +		if (code != ICMP_EXC_TTL)
> +			return;
> +		break;
> +
> +	case ICMP_REDIRECT:
> +		break;
> +	}
> +
> +	head = ipt_hash_bucket(IPT_GRE, 0);
> +	hlist_for_each_entry_rcu(proto, n, head, node) {
> +		if (proto->type != IPT_GRE || proto->portno != 0)
> +			continue;
> +		if (proto->err_handler(skb, info, &tpi) <= 0)
> +			return;
> +	}
> +}
> +
>  static const struct net_protocol net_gre_protocol = {
>  	.handler     = gre_rcv,
>  	.err_handler = gre_err,
>  	.netns_ok    = 1,
>  };
>  
> +static const struct gre_protocol ipgre_protocol = {
> +	.handler     = ipgre_rcv_v0,
> +	.err_handler = ipgre_err_v0,
> +};
> +
>  static int __init gre_init(void)
>  {
>  	pr_info("GRE over IPv4 demultiplexor driver\n");
> @@ -118,12 +350,25 @@ static int __init gre_init(void)
>  		pr_err("can't add protocol\n");
>  		return -EAGAIN;
>  	}
> +	rtnl_lock();
> +	if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
> +		pr_info("%s: can't add ipgre handler\n", __func__);
> +		rtnl_unlock();
> +		inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
> +		return -EAGAIN;
> +	}
> +	rtnl_unlock();
>  
>  	return 0;
>  }
>  
>  static void __exit gre_exit(void)
>  {
> +	rtnl_lock();
> +	if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
> +		pr_info("%s: can't remove protocol\n", __func__);
> +	rtnl_unlock();
> +
>  	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
>  }
>  
> diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
> index 0d4eecd..829fe3d 100644
> --- a/net/ipv4/ip_gre.c
> +++ b/net/ipv4/ip_gre.c
> @@ -124,1055 +124,147 @@ static bool log_ecn_error = true;
>  module_param(log_ecn_error, bool, 0644);
>  MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
>  
> -static struct rtnl_link_ops ipgre_link_ops __read_mostly;
> -static int ipgre_tunnel_init(struct net_device *dev);
> -static void ipgre_tunnel_setup(struct net_device *dev);
> -static int ipgre_tunnel_bind_dev(struct net_device *dev);
> -
>  /* Fallback tunnel: no source, no destination, no key, no options */
>  
>  #define HASH_SIZE  16
>  
>  static int ipgre_net_id __read_mostly;
> -struct ipgre_net {
> -	struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
> -
> -	struct net_device *fb_tunnel_dev;
> -};
> -
> -/* Tunnel hash table */
> -
> -/*
> -   4 hash tables:
> -
> -   3: (remote,local)
> -   2: (remote,*)
> -   1: (*,local)
> -   0: (*,*)
> -
> -   We require exact key match i.e. if a key is present in packet
> -   it will match only tunnel with the same key; if it is not present,
> -   it will match only keyless tunnel.
> -
> -   All keysless packets, if not matched configured keyless tunnels
> -   will match fallback tunnel.
> - */
> -
> -#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
> -
> -#define tunnels_r_l	tunnels[3]
> -#define tunnels_r	tunnels[2]
> -#define tunnels_l	tunnels[1]
> -#define tunnels_wc	tunnels[0]
> -
> -static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
> -						   struct rtnl_link_stats64 *tot)
> -{
> -	int i;
> -
> -	for_each_possible_cpu(i) {
> -		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
> -		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
> -		unsigned int start;
> -
> -		do {
> -			start = u64_stats_fetch_begin_bh(&tstats->syncp);
> -			rx_packets = tstats->rx_packets;
> -			tx_packets = tstats->tx_packets;
> -			rx_bytes = tstats->rx_bytes;
> -			tx_bytes = tstats->tx_bytes;
> -		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
> -
> -		tot->rx_packets += rx_packets;
> -		tot->tx_packets += tx_packets;
> -		tot->rx_bytes   += rx_bytes;
> -		tot->tx_bytes   += tx_bytes;
> -	}
> -
> -	tot->multicast = dev->stats.multicast;
> -	tot->rx_crc_errors = dev->stats.rx_crc_errors;
> -	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
> -	tot->rx_length_errors = dev->stats.rx_length_errors;
> -	tot->rx_frame_errors = dev->stats.rx_frame_errors;
> -	tot->rx_errors = dev->stats.rx_errors;
> -
> -	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
> -	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
> -	tot->tx_dropped = dev->stats.tx_dropped;
> -	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
> -	tot->tx_errors = dev->stats.tx_errors;
> -
> -	return tot;
> -}
> -
> -/* Does key in tunnel parameters match packet */
> -static bool ipgre_key_match(const struct ip_tunnel_parm *p,
> -			    __be16 flags, __be32 key)
> -{
> -	if (p->i_flags & GRE_KEY) {
> -		if (flags & GRE_KEY)
> -			return key == p->i_key;
> -		else
> -			return false;	/* key expected, none present */
> -	} else
> -		return !(flags & GRE_KEY);
> -}
> -
> -/* Given src, dst and key, find appropriate for input tunnel. */
> -
> -static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
> -					     __be32 remote, __be32 local,
> -					     __be16 flags, __be32 key,
> -					     __be16 gre_proto)
> -{
> -	struct net *net = dev_net(dev);
> -	int link = dev->ifindex;
> -	unsigned int h0 = HASH(remote);
> -	unsigned int h1 = HASH(key);
> -	struct ip_tunnel *t, *cand = NULL;
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
> -		       ARPHRD_ETHER : ARPHRD_IPGRE;
> -	int score, cand_score = 4;
> -
> -	for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
> -		if (local != t->parms.iph.saddr ||
> -		    remote != t->parms.iph.daddr ||
> -		    !(t->dev->flags & IFF_UP))
> -			continue;
> -
> -		if (!ipgre_key_match(&t->parms, flags, key))
> -			continue;
> -
> -		if (t->dev->type != ARPHRD_IPGRE &&
> -		    t->dev->type != dev_type)
> -			continue;
> -
> -		score = 0;
> -		if (t->parms.link != link)
> -			score |= 1;
> -		if (t->dev->type != dev_type)
> -			score |= 2;
> -		if (score == 0)
> -			return t;
> -
> -		if (score < cand_score) {
> -			cand = t;
> -			cand_score = score;
> -		}
> -	}
> -
> -	for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
> -		if (remote != t->parms.iph.daddr ||
> -		    !(t->dev->flags & IFF_UP))
> -			continue;
> -
> -		if (!ipgre_key_match(&t->parms, flags, key))
> -			continue;
> -
> -		if (t->dev->type != ARPHRD_IPGRE &&
> -		    t->dev->type != dev_type)
> -			continue;
> -
> -		score = 0;
> -		if (t->parms.link != link)
> -			score |= 1;
> -		if (t->dev->type != dev_type)
> -			score |= 2;
> -		if (score == 0)
> -			return t;
> -
> -		if (score < cand_score) {
> -			cand = t;
> -			cand_score = score;
> -		}
> -	}
> -
> -	for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
> -		if ((local != t->parms.iph.saddr &&
> -		     (local != t->parms.iph.daddr ||
> -		      !ipv4_is_multicast(local))) ||
> -		    !(t->dev->flags & IFF_UP))
> -			continue;
> -
> -		if (!ipgre_key_match(&t->parms, flags, key))
> -			continue;
> -
> -		if (t->dev->type != ARPHRD_IPGRE &&
> -		    t->dev->type != dev_type)
> -			continue;
> -
> -		score = 0;
> -		if (t->parms.link != link)
> -			score |= 1;
> -		if (t->dev->type != dev_type)
> -			score |= 2;
> -		if (score == 0)
> -			return t;
> -
> -		if (score < cand_score) {
> -			cand = t;
> -			cand_score = score;
> -		}
> -	}
> -
> -	for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
> -		if (t->parms.i_key != key ||
> -		    !(t->dev->flags & IFF_UP))
> -			continue;
> -
> -		if (t->dev->type != ARPHRD_IPGRE &&
> -		    t->dev->type != dev_type)
> -			continue;
> -
> -		score = 0;
> -		if (t->parms.link != link)
> -			score |= 1;
> -		if (t->dev->type != dev_type)
> -			score |= 2;
> -		if (score == 0)
> -			return t;
> -
> -		if (score < cand_score) {
> -			cand = t;
> -			cand_score = score;
> -		}
> -	}
> -
> -	if (cand != NULL)
> -		return cand;
> -
> -	dev = ign->fb_tunnel_dev;
> -	if (dev->flags & IFF_UP)
> -		return netdev_priv(dev);
> -
> -	return NULL;
> -}
> -
> -static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
> -		struct ip_tunnel_parm *parms)
> -{
> -	__be32 remote = parms->iph.daddr;
> -	__be32 local = parms->iph.saddr;
> -	__be32 key = parms->i_key;
> -	unsigned int h = HASH(key);
> -	int prio = 0;
> -
> -	if (local)
> -		prio |= 1;
> -	if (remote && !ipv4_is_multicast(remote)) {
> -		prio |= 2;
> -		h ^= HASH(remote);
> -	}
> -
> -	return &ign->tunnels[prio][h];
> -}
> -
> -static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
> -		struct ip_tunnel *t)
> -{
> -	return __ipgre_bucket(ign, &t->parms);
> -}
> -
> -static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
> -{
> -	struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
> -
> -	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
> -	rcu_assign_pointer(*tp, t);
> -}
> -
> -static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
> -{
> -	struct ip_tunnel __rcu **tp;
> -	struct ip_tunnel *iter;
> -
> -	for (tp = ipgre_bucket(ign, t);
> -	     (iter = rtnl_dereference(*tp)) != NULL;
> -	     tp = &iter->next) {
> -		if (t == iter) {
> -			rcu_assign_pointer(*tp, t->next);
> -			break;
> -		}
> -	}
> -}
> -
> -static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
> -					   struct ip_tunnel_parm *parms,
> -					   int type)
> -{
> -	__be32 remote = parms->iph.daddr;
> -	__be32 local = parms->iph.saddr;
> -	__be32 key = parms->i_key;
> -	int link = parms->link;
> -	struct ip_tunnel *t;
> -	struct ip_tunnel __rcu **tp;
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -
> -	for (tp = __ipgre_bucket(ign, parms);
> -	     (t = rtnl_dereference(*tp)) != NULL;
> -	     tp = &t->next)
> -		if (local == t->parms.iph.saddr &&
> -		    remote == t->parms.iph.daddr &&
> -		    key == t->parms.i_key &&
> -		    link == t->parms.link &&
> -		    type == t->dev->type)
> -			break;
> -
> -	return t;
> -}
> +static int ipgre_tap_net_id __read_mostly;
>  
> -static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
> -		struct ip_tunnel_parm *parms, int create)
> +static int ipgre_err(struct sk_buff *skb, u32 info,
> +			const struct tnl_ptk_info *tpi)
>  {
> -	struct ip_tunnel *t, *nt;
> -	struct net_device *dev;
> -	char name[IFNAMSIZ];
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -
> -	t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
> -	if (t || !create)
> -		return t;
> -
> -	if (parms->name[0])
> -		strlcpy(name, parms->name, IFNAMSIZ);
> -	else
> -		strcpy(name, "gre%d");
> -
> -	dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
> -	if (!dev)
> -		return NULL;
> -
> -	dev_net_set(dev, net);
> -
> -	nt = netdev_priv(dev);
> -	nt->parms = *parms;
> -	dev->rtnl_link_ops = &ipgre_link_ops;
> -
> -	dev->mtu = ipgre_tunnel_bind_dev(dev);
> -
> -	if (register_netdevice(dev) < 0)
> -		goto failed_free;
> -
> -	/* Can use a lockless transmit, unless we generate output sequences */
> -	if (!(nt->parms.o_flags & GRE_SEQ))
> -		dev->features |= NETIF_F_LLTX;
> -
> -	dev_hold(dev);
> -	ipgre_tunnel_link(ign, nt);
> -	return nt;
> -
> -failed_free:
> -	free_netdev(dev);
> -	return NULL;
> -}
> -
> -static void ipgre_tunnel_uninit(struct net_device *dev)
> -{
> -	struct net *net = dev_net(dev);
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -
> -	ipgre_tunnel_unlink(ign, netdev_priv(dev));
> -	dev_put(dev);
> -}
> -
> -
> -static void ipgre_err(struct sk_buff *skb, u32 info)
> -{
> -
> -/* All the routers (except for Linux) return only
> -   8 bytes of packet payload. It means, that precise relaying of
> -   ICMP in the real Internet is absolutely infeasible.
> -
> -   Moreover, Cisco "wise men" put GRE key to the third word
> -   in GRE header. It makes impossible maintaining even soft state for keyed
> -   GRE tunnels with enabled checksum. Tell them "thank you".
> -
> -   Well, I wonder, rfc1812 was written by Cisco employee,
> -   what the hell these idiots break standards established
> -   by themselves???
> - */
> -
> +	struct net *net = dev_net(skb->dev);
> +	struct ip_tunnel_net *itn;
>  	const struct iphdr *iph = (const struct iphdr *)skb->data;
> -	__be16	     *p = (__be16 *)(skb->data+(iph->ihl<<2));
> -	int grehlen = (iph->ihl<<2) + 4;
> -	const int type = icmp_hdr(skb)->type;
> -	const int code = icmp_hdr(skb)->code;
>  	struct ip_tunnel *t;
> -	__be16 flags;
> -	__be32 key = 0;
> -
> -	flags = p[0];
> -	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
> -		if (flags&(GRE_VERSION|GRE_ROUTING))
> -			return;
> -		if (flags&GRE_KEY) {
> -			grehlen += 4;
> -			if (flags&GRE_CSUM)
> -				grehlen += 4;
> -		}
> -	}
> +	int dev_type;
>  
> -	/* If only 8 bytes returned, keyed message will be dropped here */
> -	if (skb_headlen(skb) < grehlen)
> -		return;
> -
> -	if (flags & GRE_KEY)
> -		key = *(((__be32 *)p) + (grehlen / 4) - 1);
> -
> -	switch (type) {
> -	default:
> -	case ICMP_PARAMETERPROB:
> -		return;
> -
> -	case ICMP_DEST_UNREACH:
> -		switch (code) {
> -		case ICMP_SR_FAILED:
> -		case ICMP_PORT_UNREACH:
> -			/* Impossible event. */
> -			return;
> -		default:
> -			/* All others are translated to HOST_UNREACH.
> -			   rfc2003 contains "deep thoughts" about NET_UNREACH,
> -			   I believe they are just ether pollution. --ANK
> -			 */
> -			break;
> -		}
> -		break;
> -	case ICMP_TIME_EXCEEDED:
> -		if (code != ICMP_EXC_TTL)
> -			return;
> -		break;
> -
> -	case ICMP_REDIRECT:
> -		break;
> +	if (tpi->proto == htons(ETH_P_TEB)) {
> +		dev_type = ARPHRD_ETHER;
> +		itn = net_generic(net, ipgre_tap_net_id);
> +	} else {
> +		dev_type = ARPHRD_IPGRE;
> +		itn = net_generic(net, ipgre_net_id);
>  	}
>  
> -	t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
> -				flags, key, p[1]);
> -
> +	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
> +				iph->daddr, iph->saddr, tpi->key);
>  	if (t == NULL)
> -		return;
> -
> -	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
> -		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
> -				 t->parms.link, 0, IPPROTO_GRE, 0);
> -		return;
> -	}
> -	if (type == ICMP_REDIRECT) {
> -		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
> -			      IPPROTO_GRE, 0);
> -		return;
> -	}
> -	if (t->parms.iph.daddr == 0 ||
> -	    ipv4_is_multicast(t->parms.iph.daddr))
> -		return;
> -
> -	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
> -		return;
> +		return 1;
>  
> -	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
> -		t->err_count++;
> -	else
> -		t->err_count = 1;
> -	t->err_time = jiffies;
> -}
> -
> -static inline u8
> -ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
> -{
> -	u8 inner = 0;
> -	if (skb->protocol == htons(ETH_P_IP))
> -		inner = old_iph->tos;
> -	else if (skb->protocol == htons(ETH_P_IPV6))
> -		inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
> -	return INET_ECN_encapsulate(tos, inner);
> +	ip_tunnel_err(t, skb, info);
> +	return 0;
>  }
>  
> -static int ipgre_rcv(struct sk_buff *skb)
> +static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
>  {
> +	struct net *net = dev_net(skb->dev);
> +	struct ip_tunnel_net *itn;
>  	const struct iphdr *iph;
> -	u8     *h;
> -	__be16    flags;
> -	__sum16   csum = 0;
> -	__be32 key = 0;
> -	u32    seqno = 0;
>  	struct ip_tunnel *tunnel;
> -	int    offset = 4;
> -	__be16 gre_proto;
> -	int    err;
> -
> -	if (!pskb_may_pull(skb, 16))
> -		goto drop;
> +	int dev_type;
> +	int err;
>  
> -	iph = ip_hdr(skb);
> -	h = skb->data;
> -	flags = *(__be16 *)h;
> -
> -	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
> -		/* - Version must be 0.
> -		   - We do not support routing headers.
> -		 */
> -		if (flags&(GRE_VERSION|GRE_ROUTING))
> -			goto drop;
> -
> -		if (flags&GRE_CSUM) {
> -			switch (skb->ip_summed) {
> -			case CHECKSUM_COMPLETE:
> -				csum = csum_fold(skb->csum);
> -				if (!csum)
> -					break;
> -				/* fall through */
> -			case CHECKSUM_NONE:
> -				skb->csum = 0;
> -				csum = __skb_checksum_complete(skb);
> -				skb->ip_summed = CHECKSUM_COMPLETE;
> -			}
> -			offset += 4;
> -		}
> -		if (flags&GRE_KEY) {
> -			key = *(__be32 *)(h + offset);
> -			offset += 4;
> -		}
> -		if (flags&GRE_SEQ) {
> -			seqno = ntohl(*(__be32 *)(h + offset));
> -			offset += 4;
> -		}
> +	if (!pskb_may_pull(skb, 16))  {
> +		kfree_skb(skb);
> +		return -1;
>  	}
>  
> -	gre_proto = *(__be16 *)(h + 2);
> -
> -	tunnel = ipgre_tunnel_lookup(skb->dev,
> -				     iph->saddr, iph->daddr, flags, key,
> -				     gre_proto);
> -	if (tunnel) {
> -		struct pcpu_tstats *tstats;
> -
> -		secpath_reset(skb);
> -
> -		skb->protocol = gre_proto;
> -		/* WCCP version 1 and 2 protocol decoding.
> -		 * - Change protocol to IP
> -		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
> -		 */
> -		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
> -			skb->protocol = htons(ETH_P_IP);
> -			if ((*(h + offset) & 0xF0) != 0x40)
> -				offset += 4;
> -		}
> -
> -		skb->mac_header = skb->network_header;
> -		__pskb_pull(skb, offset);
> -		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
> -		skb->pkt_type = PACKET_HOST;
> -#ifdef CONFIG_NET_IPGRE_BROADCAST
> -		if (ipv4_is_multicast(iph->daddr)) {
> -			/* Looped back packet, drop it! */
> -			if (rt_is_output_route(skb_rtable(skb)))
> -				goto drop;
> -			tunnel->dev->stats.multicast++;
> -			skb->pkt_type = PACKET_BROADCAST;
> -		}
> -#endif
> -
> -		if (((flags&GRE_CSUM) && csum) ||
> -		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
> -			tunnel->dev->stats.rx_crc_errors++;
> -			tunnel->dev->stats.rx_errors++;
> -			goto drop;
> -		}
> -		if (tunnel->parms.i_flags&GRE_SEQ) {
> -			if (!(flags&GRE_SEQ) ||
> -			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
> -				tunnel->dev->stats.rx_fifo_errors++;
> -				tunnel->dev->stats.rx_errors++;
> -				goto drop;
> -			}
> -			tunnel->i_seqno = seqno + 1;
> -		}
> -
> -		/* Warning: All skb pointers will be invalidated! */
> -		if (tunnel->dev->type == ARPHRD_ETHER) {
> -			if (!pskb_may_pull(skb, ETH_HLEN)) {
> -				tunnel->dev->stats.rx_length_errors++;
> -				tunnel->dev->stats.rx_errors++;
> -				goto drop;
> -			}
> -
> -			iph = ip_hdr(skb);
> -			skb->protocol = eth_type_trans(skb, tunnel->dev);
> -			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
> -		}
> -
> -		__skb_tunnel_rx(skb, tunnel->dev);
> -
> -		skb_reset_network_header(skb);
> -		err = IP_ECN_decapsulate(iph, skb);
> -		if (unlikely(err)) {
> -			if (log_ecn_error)
> -				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
> -						     &iph->saddr, iph->tos);
> -			if (err > 1) {
> -				++tunnel->dev->stats.rx_frame_errors;
> -				++tunnel->dev->stats.rx_errors;
> -				goto drop;
> -			}
> -		}
> -
> -		tstats = this_cpu_ptr(tunnel->dev->tstats);
> -		u64_stats_update_begin(&tstats->syncp);
> -		tstats->rx_packets++;
> -		tstats->rx_bytes += skb->len;
> -		u64_stats_update_end(&tstats->syncp);
> +	iph = ip_hdr(skb);
>  
> -		gro_cells_receive(&tunnel->gro_cells, skb);
> -		return 0;
> +	if (tpi->proto == htons(ETH_P_TEB)) {
> +		dev_type = ARPHRD_ETHER;
> +		itn = net_generic(net, ipgre_tap_net_id);
> +	} else {
> +		dev_type = ARPHRD_IPGRE;
> +		itn = net_generic(net, ipgre_net_id);

dev_type is set, but unused. Same in ipgre_err()

>  	}
> -	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
>  
> -drop:
> -	kfree_skb(skb);
> -	return 0;
> +	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
> +			iph->saddr, iph->daddr, tpi->key);
> +	if (tunnel)
> +		err = ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
> +	else
> +		err = 1;
> +
> +	return err;
>  }
>  
>  static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
>  {
> +	__be16 proto = skb->dev->type == ARPHRD_ETHER ?
> +			htons(ETH_P_TEB) : skb->protocol;
> +	struct tnl_ptk_info tpi;
>  	struct ip_tunnel *tunnel = netdev_priv(dev);
> -	const struct iphdr  *old_iph = ip_hdr(skb);
>  	const struct iphdr  *tiph;
> -	struct flowi4 fl4;
> -	u8     tos;
> -	__be16 df;
> -	struct rtable *rt;     			/* Route to the other host */
> -	struct net_device *tdev;		/* Device to other host */
> -	struct iphdr  *iph;			/* Our new IP header */
> -	unsigned int max_headroom;		/* The extra header space needed */
> +	struct iphdr  iph;			/* Our new IP header */
>  	int    gre_hlen;
> -	__be32 dst;
> -	int    mtu;
> -
> -	if (skb->ip_summed == CHECKSUM_PARTIAL &&
> -	    skb_checksum_help(skb))
> -		goto tx_error;
> -
> -	if (dev->type == ARPHRD_ETHER)
> -		IPCB(skb)->flags = 0;
> +	int err, tx_len;
>  
>  	if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
>  		gre_hlen = 0;
>  		tiph = (const struct iphdr *)skb->data;
>  	} else {
> -		gre_hlen = tunnel->hlen;
> +		gre_hlen = tunnel->hlen + sizeof(struct iphdr);
>  		tiph = &tunnel->parms.iph;
>  	}
>  
> -	if ((dst = tiph->daddr) == 0) {
> -		/* NBMA tunnel */
> -
> -		if (skb_dst(skb) == NULL) {
> -			dev->stats.tx_fifo_errors++;
> -			goto tx_error;
> -		}
> -
> -		if (skb->protocol == htons(ETH_P_IP)) {
> -			rt = skb_rtable(skb);
> -			dst = rt_nexthop(rt, old_iph->daddr);
> -		}
> -#if IS_ENABLED(CONFIG_IPV6)
> -		else if (skb->protocol == htons(ETH_P_IPV6)) {
> -			const struct in6_addr *addr6;
> -			struct neighbour *neigh;
> -			bool do_tx_error_icmp;
> -			int addr_type;
> -
> -			neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
> -			if (neigh == NULL)
> -				goto tx_error;
> -
> -			addr6 = (const struct in6_addr *)&neigh->primary_key;
> -			addr_type = ipv6_addr_type(addr6);
> -
> -			if (addr_type == IPV6_ADDR_ANY) {
> -				addr6 = &ipv6_hdr(skb)->daddr;
> -				addr_type = ipv6_addr_type(addr6);
> -			}
> -
> -			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
> -				do_tx_error_icmp = true;
> -			else {
> -				do_tx_error_icmp = false;
> -				dst = addr6->s6_addr32[3];
> -			}
> -			neigh_release(neigh);
> -			if (do_tx_error_icmp)
> -				goto tx_error_icmp;
> -		}
> -#endif
> -		else
> -			goto tx_error;
> -	}
> -
> -	tos = tiph->tos;
> -	if (tos == 1) {
> -		tos = 0;
> -		if (skb->protocol == htons(ETH_P_IP))
> -			tos = old_iph->tos;
> -		else if (skb->protocol == htons(ETH_P_IPV6))
> -			tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
> -	}
> -
> -	rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
> -				 tunnel->parms.o_key, RT_TOS(tos),
> -				 tunnel->parms.link);
> -	if (IS_ERR(rt)) {
> -		dev->stats.tx_carrier_errors++;
> -		goto tx_error;
> -	}
> -	tdev = rt->dst.dev;
> -
> -	if (tdev == dev) {
> -		ip_rt_put(rt);
> -		dev->stats.collisions++;
> -		goto tx_error;
> -	}
> -
> -	df = tiph->frag_off;
> -	if (df)
> -		mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
> -	else
> -		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
> -
> -	if (skb_dst(skb))
> -		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
> -
> -	if (skb->protocol == htons(ETH_P_IP)) {
> -		df |= (old_iph->frag_off&htons(IP_DF));
> -
> -		if ((old_iph->frag_off&htons(IP_DF)) &&
> -		    mtu < ntohs(old_iph->tot_len)) {
> -			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
> -			ip_rt_put(rt);
> -			goto tx_error;
> -		}
> -	}
> -#if IS_ENABLED(CONFIG_IPV6)
> -	else if (skb->protocol == htons(ETH_P_IPV6)) {
> -		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
> -
> -		if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
> -			if ((tunnel->parms.iph.daddr &&
> -			     !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
> -			    rt6->rt6i_dst.plen == 128) {
> -				rt6->rt6i_flags |= RTF_MODIFIED;
> -				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
> -			}
> -		}
> -
> -		if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
> -			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
> -			ip_rt_put(rt);
> -			goto tx_error;
> -		}
> -	}
> -#endif
> -
> -	if (tunnel->err_count > 0) {
> -		if (time_before(jiffies,
> -				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
> -			tunnel->err_count--;
> -
> -			dst_link_failure(skb);
> -		} else
> -			tunnel->err_count = 0;
> -	}
> -
> -	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
> -
> -	if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
> -	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
> -		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
> -		if (max_headroom > dev->needed_headroom)
> -			dev->needed_headroom = max_headroom;
> -		if (!new_skb) {
> -			ip_rt_put(rt);
> -			dev->stats.tx_dropped++;
> -			dev_kfree_skb(skb);
> -			return NETDEV_TX_OK;
> -		}
> -		if (skb->sk)
> -			skb_set_owner_w(new_skb, skb->sk);
> +	err = ip_tunnel_build_iphdr(skb, dev, tiph, gre_hlen, &iph);
> +	if (err)
> +		return NETDEV_TX_OK;
> +
> +	tpi.flags = tunnel->parms.o_flags;
> +	tpi.proto = proto;
> +	tpi.key = tunnel->parms.o_key;
> +	tpi.seq =  htonl(tunnel->o_seqno);
> +	tpi.hdr_len = tunnel->hlen;
> +	if (skb_cow_head(skb, dev->needed_headroom)) {
> +		dev->stats.tx_dropped++;
>  		dev_kfree_skb(skb);
> -		skb = new_skb;
> -		old_iph = ip_hdr(skb);
> -	}
> -
> -	skb_reset_transport_header(skb);
> -	skb_push(skb, gre_hlen);
> -	skb_reset_network_header(skb);
> -	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
> -	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
> -			      IPSKB_REROUTED);
> -	skb_dst_drop(skb);
> -	skb_dst_set(skb, &rt->dst);
> -
> -	/*
> -	 *	Push down and install the IPIP header.
> -	 */
> -
> -	iph 			=	ip_hdr(skb);
> -	iph->version		=	4;
> -	iph->ihl		=	sizeof(struct iphdr) >> 2;
> -	iph->frag_off		=	df;
> -	iph->protocol		=	IPPROTO_GRE;
> -	iph->tos		=	ipgre_ecn_encapsulate(tos, old_iph, skb);
> -	iph->daddr		=	fl4.daddr;
> -	iph->saddr		=	fl4.saddr;
> -
> -	if ((iph->ttl = tiph->ttl) == 0) {
> -		if (skb->protocol == htons(ETH_P_IP))
> -			iph->ttl = old_iph->ttl;
> -#if IS_ENABLED(CONFIG_IPV6)
> -		else if (skb->protocol == htons(ETH_P_IPV6))
> -			iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
> -#endif
> -		else
> -			iph->ttl = ip4_dst_hoplimit(&rt->dst);
> +		return NETDEV_TX_OK;
>  	}
>  
> -	((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
> -	((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
> -				   htons(ETH_P_TEB) : skb->protocol;
> +	tx_len = ip_tunnel_xmit(skb, &iph, &tpi, gre_build_header);
> +	if (tx_len > 0) {
> +		struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
>  
> -	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
> -		__be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
> +		u64_stats_update_begin(&tstats->syncp);
> +		tstats->tx_bytes += tx_len;
> +		tstats->tx_packets++;
> +		u64_stats_update_end(&tstats->syncp);
>  
> -		if (tunnel->parms.o_flags&GRE_SEQ) {
> +		if (tunnel->parms.o_flags&GRE_SEQ)
>  			++tunnel->o_seqno;
> -			*ptr = htonl(tunnel->o_seqno);
> -			ptr--;
> -		}
> -		if (tunnel->parms.o_flags&GRE_KEY) {
> -			*ptr = tunnel->parms.o_key;
> -			ptr--;
> -		}
> -		if (tunnel->parms.o_flags&GRE_CSUM) {
> -			*ptr = 0;
> -			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
> -		}
> +	} else {
> +		dev->stats.tx_errors++;
> +		dev->stats.tx_aborted_errors++;
>  	}
>  
> -	iptunnel_xmit(skb, dev);
> -	return NETDEV_TX_OK;
> -
> -#if IS_ENABLED(CONFIG_IPV6)
> -tx_error_icmp:
> -	dst_link_failure(skb);
> -#endif
> -tx_error:
> -	dev->stats.tx_errors++;
> -	dev_kfree_skb(skb);
>  	return NETDEV_TX_OK;
>  }
>  
> -static int ipgre_tunnel_bind_dev(struct net_device *dev)
> +static int ipgre_get_ioctl_param(struct net_device *dev, struct ifreq *ifr,
> +				struct ip_tunnel_parm *p)
>  {
> -	struct net_device *tdev = NULL;
> -	struct ip_tunnel *tunnel;
> -	const struct iphdr *iph;
> -	int hlen = LL_MAX_HEADER;
> -	int mtu = ETH_DATA_LEN;
> -	int addend = sizeof(struct iphdr) + 4;
> +	BUILD_BUG_ON(TUNNEL_KEY  != GRE_KEY);
> +	BUILD_BUG_ON(TUNNEL_SEQ  != GRE_SEQ);
> +	BUILD_BUG_ON(TUNNEL_CSUM != GRE_CSUM);
>  
> -	tunnel = netdev_priv(dev);
> -	iph = &tunnel->parms.iph;
> +	if (copy_from_user(p, ifr->ifr_ifru.ifru_data, sizeof(*p)))
> +		return -EFAULT;
>  
> -	/* Guess output device to choose reasonable mtu and needed_headroom */
> -
> -	if (iph->daddr) {
> -		struct flowi4 fl4;
> -		struct rtable *rt;
> -
> -		rt = ip_route_output_gre(dev_net(dev), &fl4,
> -					 iph->daddr, iph->saddr,
> -					 tunnel->parms.o_key,
> -					 RT_TOS(iph->tos),
> -					 tunnel->parms.link);
> -		if (!IS_ERR(rt)) {
> -			tdev = rt->dst.dev;
> -			ip_rt_put(rt);
> -		}
> -
> -		if (dev->type != ARPHRD_ETHER)
> -			dev->flags |= IFF_POINTOPOINT;
> -	}
> -
> -	if (!tdev && tunnel->parms.link)
> -		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
> -
> -	if (tdev) {
> -		hlen = tdev->hard_header_len + tdev->needed_headroom;
> -		mtu = tdev->mtu;
> -	}
> -	dev->iflink = tunnel->parms.link;
> -
> -	/* Precalculate GRE options length */
> -	if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
> -		if (tunnel->parms.o_flags&GRE_CSUM)
> -			addend += 4;
> -		if (tunnel->parms.o_flags&GRE_KEY)
> -			addend += 4;
> -		if (tunnel->parms.o_flags&GRE_SEQ)
> -			addend += 4;
> -	}
> -	dev->needed_headroom = addend + hlen;
> -	mtu -= dev->hard_header_len + addend;
> -
> -	if (mtu < 68)
> -		mtu = 68;
> -
> -	tunnel->hlen = addend;
> -
> -	return mtu;
> -}
> -
> -static int
> -ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
> -{
> -	int err = 0;
> -	struct ip_tunnel_parm p;
> -	struct ip_tunnel *t;
> -	struct net *net = dev_net(dev);
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -
> -	switch (cmd) {
> -	case SIOCGETTUNNEL:
> -		t = NULL;
> -		if (dev == ign->fb_tunnel_dev) {
> -			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
> -				err = -EFAULT;
> -				break;
> -			}
> -			t = ipgre_tunnel_locate(net, &p, 0);
> -		}
> -		if (t == NULL)
> -			t = netdev_priv(dev);
> -		memcpy(&p, &t->parms, sizeof(p));
> -		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
> -			err = -EFAULT;
> -		break;
> -
> -	case SIOCADDTUNNEL:
> -	case SIOCCHGTUNNEL:
> -		err = -EPERM;
> -		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
> -			goto done;
> -
> -		err = -EFAULT;
> -		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
> -			goto done;
> -
> -		err = -EINVAL;
> -		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
> -		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
> -		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
> -			goto done;
> -		if (p.iph.ttl)
> -			p.iph.frag_off |= htons(IP_DF);
> -
> -		if (!(p.i_flags&GRE_KEY))
> -			p.i_key = 0;
> -		if (!(p.o_flags&GRE_KEY))
> -			p.o_key = 0;
> -
> -		t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
> -
> -		if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
> -			if (t != NULL) {
> -				if (t->dev != dev) {
> -					err = -EEXIST;
> -					break;
> -				}
> -			} else {
> -				unsigned int nflags = 0;
> -
> -				t = netdev_priv(dev);
> -
> -				if (ipv4_is_multicast(p.iph.daddr))
> -					nflags = IFF_BROADCAST;
> -				else if (p.iph.daddr)
> -					nflags = IFF_POINTOPOINT;
> -
> -				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
> -					err = -EINVAL;
> -					break;
> -				}
> -				ipgre_tunnel_unlink(ign, t);
> -				synchronize_net();
> -				t->parms.iph.saddr = p.iph.saddr;
> -				t->parms.iph.daddr = p.iph.daddr;
> -				t->parms.i_key = p.i_key;
> -				t->parms.o_key = p.o_key;
> -				memcpy(dev->dev_addr, &p.iph.saddr, 4);
> -				memcpy(dev->broadcast, &p.iph.daddr, 4);
> -				ipgre_tunnel_link(ign, t);
> -				netdev_state_change(dev);
> -			}
> -		}
> -
> -		if (t) {
> -			err = 0;
> -			if (cmd == SIOCCHGTUNNEL) {
> -				t->parms.iph.ttl = p.iph.ttl;
> -				t->parms.iph.tos = p.iph.tos;
> -				t->parms.iph.frag_off = p.iph.frag_off;
> -				if (t->parms.link != p.link) {
> -					t->parms.link = p.link;
> -					dev->mtu = ipgre_tunnel_bind_dev(dev);
> -					netdev_state_change(dev);
> -				}
> -			}
> -			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
> -				err = -EFAULT;
> -		} else
> -			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
> -		break;
> -
> -	case SIOCDELTUNNEL:
> -		err = -EPERM;
> -		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
> -			goto done;
> -
> -		if (dev == ign->fb_tunnel_dev) {
> -			err = -EFAULT;
> -			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
> -				goto done;
> -			err = -ENOENT;
> -			if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
> -				goto done;
> -			err = -EPERM;
> -			if (t == netdev_priv(ign->fb_tunnel_dev))
> -				goto done;
> -			dev = t->dev;
> -		}
> -		unregister_netdevice(dev);
> -		err = 0;
> -		break;
> -
> -	default:
> -		err = -EINVAL;
> -	}
> -
> -done:
> -	return err;
> -}
> -
> -static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
> -{
> -	struct ip_tunnel *tunnel = netdev_priv(dev);
> -	if (new_mtu < 68 ||
> -	    new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
> +	if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
> +			p->iph.ihl != 5 || (p->iph.frag_off&htons(~IP_DF)) ||
> +			((p->i_flags|p->o_flags)&(GRE_VERSION|GRE_ROUTING))) {
>  		return -EINVAL;
> -	dev->mtu = new_mtu;
> +	}
> +	if (p->iph.ttl)
> +		p->iph.frag_off |= htons(IP_DF);
> +	if (!(p->i_flags&GRE_KEY))
> +		p->i_key = 0;
> +	if (!(p->o_flags&GRE_KEY))
> +		p->o_key = 0;
>  	return 0;
>  }
>  
> @@ -1285,180 +377,6 @@ static int ipgre_close(struct net_device *dev)
>  
>  #endif
>  
> -static const struct net_device_ops ipgre_netdev_ops = {
> -	.ndo_init		= ipgre_tunnel_init,
> -	.ndo_uninit		= ipgre_tunnel_uninit,
> -#ifdef CONFIG_NET_IPGRE_BROADCAST
> -	.ndo_open		= ipgre_open,
> -	.ndo_stop		= ipgre_close,
> -#endif
> -	.ndo_start_xmit		= ipgre_tunnel_xmit,
> -	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
> -	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
> -	.ndo_get_stats64	= ipgre_get_stats64,
> -};
> -
> -static void ipgre_dev_free(struct net_device *dev)
> -{
> -	struct ip_tunnel *tunnel = netdev_priv(dev);
> -
> -	gro_cells_destroy(&tunnel->gro_cells);
> -	free_percpu(dev->tstats);
> -	free_netdev(dev);
> -}
> -
> -#define GRE_FEATURES (NETIF_F_SG |		\
> -		      NETIF_F_FRAGLIST |	\
> -		      NETIF_F_HIGHDMA |		\
> -		      NETIF_F_HW_CSUM)
> -
> -static void ipgre_tunnel_setup(struct net_device *dev)
> -{
> -	dev->netdev_ops		= &ipgre_netdev_ops;
> -	dev->destructor 	= ipgre_dev_free;
> -
> -	dev->type		= ARPHRD_IPGRE;
> -	dev->needed_headroom 	= LL_MAX_HEADER + sizeof(struct iphdr) + 4;
> -	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 4;
> -	dev->flags		= IFF_NOARP;
> -	dev->iflink		= 0;
> -	dev->addr_len		= 4;
> -	dev->features		|= NETIF_F_NETNS_LOCAL;
> -	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
> -
> -	dev->features		|= GRE_FEATURES;
> -	dev->hw_features	|= GRE_FEATURES;
> -}
> -
> -static int ipgre_tunnel_init(struct net_device *dev)
> -{
> -	struct ip_tunnel *tunnel;
> -	struct iphdr *iph;
> -	int err;
> -
> -	tunnel = netdev_priv(dev);
> -	iph = &tunnel->parms.iph;
> -
> -	tunnel->dev = dev;
> -	strcpy(tunnel->parms.name, dev->name);
> -
> -	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
> -	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
> -
> -	if (iph->daddr) {
> -#ifdef CONFIG_NET_IPGRE_BROADCAST
> -		if (ipv4_is_multicast(iph->daddr)) {
> -			if (!iph->saddr)
> -				return -EINVAL;
> -			dev->flags = IFF_BROADCAST;
> -			dev->header_ops = &ipgre_header_ops;
> -		}
> -#endif
> -	} else
> -		dev->header_ops = &ipgre_header_ops;
> -
> -	dev->tstats = alloc_percpu(struct pcpu_tstats);
> -	if (!dev->tstats)
> -		return -ENOMEM;
> -
> -	err = gro_cells_init(&tunnel->gro_cells, dev);
> -	if (err) {
> -		free_percpu(dev->tstats);
> -		return err;
> -	}
> -
> -	return 0;
> -}
> -
> -static void ipgre_fb_tunnel_init(struct net_device *dev)
> -{
> -	struct ip_tunnel *tunnel = netdev_priv(dev);
> -	struct iphdr *iph = &tunnel->parms.iph;
> -
> -	tunnel->dev = dev;
> -	strcpy(tunnel->parms.name, dev->name);
> -
> -	iph->version		= 4;
> -	iph->protocol		= IPPROTO_GRE;
> -	iph->ihl		= 5;
> -	tunnel->hlen		= sizeof(struct iphdr) + 4;
> -
> -	dev_hold(dev);
> -}
> -
> -
> -static const struct gre_protocol ipgre_protocol = {
> -	.handler     = ipgre_rcv,
> -	.err_handler = ipgre_err,
> -};
> -
> -static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
> -{
> -	int prio;
> -
> -	for (prio = 0; prio < 4; prio++) {
> -		int h;
> -		for (h = 0; h < HASH_SIZE; h++) {
> -			struct ip_tunnel *t;
> -
> -			t = rtnl_dereference(ign->tunnels[prio][h]);
> -
> -			while (t != NULL) {
> -				unregister_netdevice_queue(t->dev, head);
> -				t = rtnl_dereference(t->next);
> -			}
> -		}
> -	}
> -}
> -
> -static int __net_init ipgre_init_net(struct net *net)
> -{
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -	int err;
> -
> -	ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
> -					   ipgre_tunnel_setup);
> -	if (!ign->fb_tunnel_dev) {
> -		err = -ENOMEM;
> -		goto err_alloc_dev;
> -	}
> -	dev_net_set(ign->fb_tunnel_dev, net);
> -
> -	ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
> -	ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
> -
> -	if ((err = register_netdev(ign->fb_tunnel_dev)))
> -		goto err_reg_dev;
> -
> -	rcu_assign_pointer(ign->tunnels_wc[0],
> -			   netdev_priv(ign->fb_tunnel_dev));
> -	return 0;
> -
> -err_reg_dev:
> -	ipgre_dev_free(ign->fb_tunnel_dev);
> -err_alloc_dev:
> -	return err;
> -}
> -
> -static void __net_exit ipgre_exit_net(struct net *net)
> -{
> -	struct ipgre_net *ign;
> -	LIST_HEAD(list);
> -
> -	ign = net_generic(net, ipgre_net_id);
> -	rtnl_lock();
> -	ipgre_destroy_tunnels(ign, &list);
> -	unregister_netdevice_many(&list);
> -	rtnl_unlock();
> -}
> -
> -static struct pernet_operations ipgre_net_ops = {
> -	.init = ipgre_init_net,
> -	.exit = ipgre_exit_net,
> -	.id   = &ipgre_net_id,
> -	.size = sizeof(struct ipgre_net),
> -};
> -
>  static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
>  {
>  	__be16 flags;
> @@ -1501,15 +419,15 @@ out:
>  	return ipgre_tunnel_validate(tb, data);
>  }
>  
> -static void ipgre_netlink_parms(struct nlattr *data[],
> -				struct ip_tunnel_parm *parms)
> +static int ipgre_netlink_parms(struct ip_tunnel *itn, struct nlattr *data[],
> +			struct nlattr *tb[], struct ip_tunnel_parm *parms)
>  {
>  	memset(parms, 0, sizeof(*parms));
>  
>  	parms->iph.protocol = IPPROTO_GRE;
>  
>  	if (!data)
> -		return;
> +		return 0;
>  
>  	if (data[IFLA_GRE_LINK])
>  		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
> @@ -1540,148 +458,10 @@ static void ipgre_netlink_parms(struct nlattr *data[],
>  
>  	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
>  		parms->iph.frag_off = htons(IP_DF);
> -}
> -
> -static int ipgre_tap_init(struct net_device *dev)
> -{
> -	struct ip_tunnel *tunnel;
> -
> -	tunnel = netdev_priv(dev);
> -
> -	tunnel->dev = dev;
> -	strcpy(tunnel->parms.name, dev->name);
> -
> -	ipgre_tunnel_bind_dev(dev);
> -
> -	dev->tstats = alloc_percpu(struct pcpu_tstats);
> -	if (!dev->tstats)
> -		return -ENOMEM;
>  
>  	return 0;
>  }
>  
> -static const struct net_device_ops ipgre_tap_netdev_ops = {
> -	.ndo_init		= ipgre_tap_init,
> -	.ndo_uninit		= ipgre_tunnel_uninit,
> -	.ndo_start_xmit		= ipgre_tunnel_xmit,
> -	.ndo_set_mac_address 	= eth_mac_addr,
> -	.ndo_validate_addr	= eth_validate_addr,
> -	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
> -	.ndo_get_stats64	= ipgre_get_stats64,
> -};
> -
> -static void ipgre_tap_setup(struct net_device *dev)
> -{
> -
> -	ether_setup(dev);
> -
> -	dev->netdev_ops		= &ipgre_tap_netdev_ops;
> -	dev->destructor 	= ipgre_dev_free;
> -
> -	dev->iflink		= 0;
> -	dev->features		|= NETIF_F_NETNS_LOCAL;
> -}
> -
> -static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
> -			 struct nlattr *data[])
> -{
> -	struct ip_tunnel *nt;
> -	struct net *net = dev_net(dev);
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -	int mtu;
> -	int err;
> -
> -	nt = netdev_priv(dev);
> -	ipgre_netlink_parms(data, &nt->parms);
> -
> -	if (ipgre_tunnel_find(net, &nt->parms, dev->type))
> -		return -EEXIST;
> -
> -	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
> -		eth_hw_addr_random(dev);
> -
> -	mtu = ipgre_tunnel_bind_dev(dev);
> -	if (!tb[IFLA_MTU])
> -		dev->mtu = mtu;
> -
> -	/* Can use a lockless transmit, unless we generate output sequences */
> -	if (!(nt->parms.o_flags & GRE_SEQ))
> -		dev->features |= NETIF_F_LLTX;
> -
> -	err = register_netdevice(dev);
> -	if (err)
> -		goto out;
> -
> -	dev_hold(dev);
> -	ipgre_tunnel_link(ign, nt);
> -
> -out:
> -	return err;
> -}
> -
> -static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
> -			    struct nlattr *data[])
> -{
> -	struct ip_tunnel *t, *nt;
> -	struct net *net = dev_net(dev);
> -	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
> -	struct ip_tunnel_parm p;
> -	int mtu;
> -
> -	if (dev == ign->fb_tunnel_dev)
> -		return -EINVAL;
> -
> -	nt = netdev_priv(dev);
> -	ipgre_netlink_parms(data, &p);
> -
> -	t = ipgre_tunnel_locate(net, &p, 0);
> -
> -	if (t) {
> -		if (t->dev != dev)
> -			return -EEXIST;
> -	} else {
> -		t = nt;
> -
> -		if (dev->type != ARPHRD_ETHER) {
> -			unsigned int nflags = 0;
> -
> -			if (ipv4_is_multicast(p.iph.daddr))
> -				nflags = IFF_BROADCAST;
> -			else if (p.iph.daddr)
> -				nflags = IFF_POINTOPOINT;
> -
> -			if ((dev->flags ^ nflags) &
> -			    (IFF_POINTOPOINT | IFF_BROADCAST))
> -				return -EINVAL;
> -		}
> -
> -		ipgre_tunnel_unlink(ign, t);
> -		t->parms.iph.saddr = p.iph.saddr;
> -		t->parms.iph.daddr = p.iph.daddr;
> -		t->parms.i_key = p.i_key;
> -		if (dev->type != ARPHRD_ETHER) {
> -			memcpy(dev->dev_addr, &p.iph.saddr, 4);
> -			memcpy(dev->broadcast, &p.iph.daddr, 4);
> -		}
> -		ipgre_tunnel_link(ign, t);
> -		netdev_state_change(dev);
> -	}
> -
> -	t->parms.o_key = p.o_key;
> -	t->parms.iph.ttl = p.iph.ttl;
> -	t->parms.iph.tos = p.iph.tos;
> -	t->parms.iph.frag_off = p.iph.frag_off;
> -
> -	if (t->parms.link != p.link) {
> -		t->parms.link = p.link;
> -		mtu = ipgre_tunnel_bind_dev(dev);
> -		if (!tb[IFLA_MTU])
> -			dev->mtu = mtu;
> -		netdev_state_change(dev);
> -	}
> -
> -	return 0;
> -}
>  
>  static size_t ipgre_get_size(const struct net_device *dev)
>  {
> @@ -1745,6 +525,94 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
>  	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
>  };
>  
> +static int ip_gre_calc_hlen(struct ip_tunnel *tunnel)
> +{
> +	int addend = 4;
> +
> +	/* Precalculate GRE options length */
> +	if (tunnel->parms.o_flags&GRE_CSUM)
> +		addend += 4;
> +	if (tunnel->parms.o_flags&GRE_KEY)
> +		addend += 4;
> +	if (tunnel->parms.o_flags&GRE_SEQ)
> +		addend += 4;
> +	return addend;
> +}
> +
> +#define GRE_FEATURES (NETIF_F_SG |		\
> +		      NETIF_F_FRAGLIST |	\
> +		      NETIF_F_HIGHDMA |		\
> +		      NETIF_F_HW_CSUM)
> +
> +static void __gre_tunnel_init(struct net_device *dev)
> +{
> +	struct ip_tunnel *tunnel;
> +
> +	tunnel = netdev_priv(dev);
> +	tunnel->hlen = ip_gre_calc_hlen(tunnel);
> +	tunnel->parms.iph.protocol = IPPROTO_GRE;
> +
> +	dev->needed_headroom	= LL_MAX_HEADER + sizeof(struct iphdr) + 4;
> +	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 4;
> +	dev->iflink		= 0;
> +
> +	dev->features		|= NETIF_F_NETNS_LOCAL | GRE_FEATURES;
> +	dev->hw_features	|= GRE_FEATURES;
> +}
> +
> +static int ipgre_tunnel_init(struct net_device *dev)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct iphdr *iph;
> +
> +	__gre_tunnel_init(dev);
> +
> +	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
> +	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
> +
> +	dev->type		= ARPHRD_IPGRE;
> +	dev->flags		= IFF_NOARP;
> +	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
> +	dev->addr_len		= 4;
> +
> +	iph = &tunnel->parms.iph;
> +	if (iph->daddr) {
> +#ifdef CONFIG_NET_IPGRE_BROADCAST
> +		if (ipv4_is_multicast(iph->daddr)) {
> +			if (!iph->saddr)
> +				return -EINVAL;
> +			dev->flags = IFF_BROADCAST;
> +			dev->header_ops = &ipgre_header_ops;
> +		}
> +#endif
> +	} else
> +		dev->header_ops = &ipgre_header_ops;
> +
> +
> +	return ip_tunnel_init(dev);
> +}
> +
> +static int ipgre_tap_init(struct net_device *dev)
> +{
> +	__gre_tunnel_init(dev);
> +	return ip_tunnel_init(dev);
> +}
> +
> +static const struct net_device_ops ipgre_tap_netdev_ops;
> +static inline void ipgre_tap_tunnel_setup(struct net_device *dev)
> +{
> +	ether_setup(dev);
> +	dev->netdev_ops = &ipgre_tap_netdev_ops;
> +	ip_tunnel_setup(dev, ipgre_tap_net_id);
> +}
> +
> +static const struct net_device_ops ipgre_netdev_ops;
> +static inline void ipgre_tunnel_setup(struct net_device *dev)
> +{
> +	dev->netdev_ops = &ipgre_netdev_ops;
> +	ip_tunnel_setup(dev, ipgre_net_id);
> +}
> +
>  static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
>  	.kind		= "gre",
>  	.maxtype	= IFLA_GRE_MAX,
> @@ -1752,8 +620,9 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
>  	.priv_size	= sizeof(struct ip_tunnel),
>  	.setup		= ipgre_tunnel_setup,
>  	.validate	= ipgre_tunnel_validate,
> -	.newlink	= ipgre_newlink,
> -	.changelink	= ipgre_changelink,
> +	.newlink	= ip_tunnel_newlink,
> +	.changelink	= ip_tunnel_changelink,
> +	.dellink	= ip_tunnel_dellink,
>  	.get_size	= ipgre_get_size,
>  	.fill_info	= ipgre_fill_info,
>  };
> @@ -1763,17 +632,97 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
>  	.maxtype	= IFLA_GRE_MAX,
>  	.policy		= ipgre_policy,
>  	.priv_size	= sizeof(struct ip_tunnel),
> -	.setup		= ipgre_tap_setup,
> +	.setup		= ipgre_tap_tunnel_setup,
>  	.validate	= ipgre_tap_validate,
> -	.newlink	= ipgre_newlink,
> -	.changelink	= ipgre_changelink,
> +	.newlink	= ip_tunnel_newlink,
> +	.changelink	= ip_tunnel_changelink,
> +	.dellink	= ip_tunnel_dellink,
>  	.get_size	= ipgre_get_size,
>  	.fill_info	= ipgre_fill_info,
>  };
>  
> -/*
> - *	And now the modules code and kernel interface.
> - */
> +static const struct net_device_ops ipgre_netdev_ops = {
> +	.ndo_init		= ipgre_tunnel_init,
> +	.ndo_uninit		= ip_tunnel_uninit,
> +	.ndo_start_xmit		= ipgre_tunnel_xmit,
> +#ifdef CONFIG_NET_IPGRE_BROADCAST
> +	.ndo_open		= ipgre_open,
> +	.ndo_stop		= ipgre_close,
> +#endif
> +	.ndo_do_ioctl		= ip_tunnel_ioctl,
> +	.ndo_change_mtu		= ip_tunnel_change_mtu,
> +	.ndo_get_stats64	= ip_tunnel_get_stats64,
> +};
> +
> +static const struct net_device_ops ipgre_tap_netdev_ops = {
> +	.ndo_init		= ipgre_tap_init,
> +	.ndo_uninit		= ip_tunnel_uninit,
> +	.ndo_start_xmit		= ipgre_tunnel_xmit,
> +	.ndo_set_mac_address	= eth_mac_addr,
> +	.ndo_validate_addr	= eth_validate_addr,
> +	.ndo_change_mtu		= ip_tunnel_change_mtu,
> +	.ndo_get_stats64	= ip_tunnel_get_stats64,
> +};
> +
> +static const struct ip_tunnel_ops ipgre_tunnel_ops = {
> +	.parse_netlink_parms = ipgre_netlink_parms,
> +	.get_ioctl_param = ipgre_get_ioctl_param,
> +};
> +
> +static int __net_init ipgre_init_net(struct net *net)
> +{
> +	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
> +
> +	itn->ops = &ipgre_tunnel_ops;
> +	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops);
> +}
> +
> +static void __net_exit ipgre_exit_net(struct net *net)
> +{
> +	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
> +	ip_tunnel_delete_net(itn);
> +}
> +
> +static struct pernet_operations ipgre_net_ops = {
> +	.init = ipgre_init_net,
> +	.exit = ipgre_exit_net,
> +	.id   = &ipgre_net_id,
> +	.size = sizeof(struct ip_tunnel_net),
> +};
> +
> +static const struct ip_tunnel_ops ipgre_tap_tunnel_ops = {
> +	.parse_netlink_parms = ipgre_netlink_parms,
> +	.get_ioctl_param = ipgre_get_ioctl_param,
> +};
> +
> +static int __net_init ipgre_tap_init_net(struct net *net)
> +{
> +	struct ip_tunnel_net *itn = net_generic(net, ipgre_tap_net_id);
> +
> +	itn->ops = &ipgre_tap_tunnel_ops;
> +	return ip_tunnel_init_net(net, ipgre_tap_net_id, &ipgre_tap_ops);
> +}
> +
> +static void __net_exit ipgre_tap_exit_net(struct net *net)
> +{
> +	struct ip_tunnel_net *itn = net_generic(net, ipgre_tap_net_id);
> +	ip_tunnel_delete_net(itn);
> +}
> +
> +static struct pernet_operations ipgre_tap_net_ops = {
> +	.init = ipgre_tap_init_net,
> +	.exit = ipgre_tap_exit_net,
> +	.id   = &ipgre_tap_net_id,
> +	.size = sizeof(struct ip_tunnel_net),
> +};
> +
> +static struct ipt_protocol ipgre_protocol = {
> +	.handler	= ipgre_rcv,
> +	.err_handler	= ipgre_err,
> +	.portno		= 0,
> +	.priority	= 0,
> +	.type		= IPT_GRE,
> +};
>  
>  static int __init ipgre_init(void)
>  {
> @@ -1785,8 +734,12 @@ static int __init ipgre_init(void)
>  	if (err < 0)
>  		return err;
>  
> +	err = register_pernet_device(&ipgre_tap_net_ops);
> +	if (err < 0)
> +		goto pnet_tap_faied;
> +
>  	rtnl_lock();
> -	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
> +	err = ipt_add_protocol(&ipgre_protocol);
>  	if (err < 0) {
>  		rtnl_unlock();
>  		pr_info("%s: can't add protocol\n", __func__);
> @@ -1802,16 +755,17 @@ static int __init ipgre_init(void)
>  	if (err < 0)
>  		goto tap_ops_failed;
>  
> -out:
> -	return err;
> +	return 0;
>  
>  tap_ops_failed:
>  	rtnl_link_unregister(&ipgre_link_ops);
>  rtnl_link_failed:
> -	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
> +	ipt_del_protocol(&ipgre_protocol);
>  add_proto_failed:
> +	unregister_pernet_device(&ipgre_tap_net_ops);
> +pnet_tap_faied:
>  	unregister_pernet_device(&ipgre_net_ops);
> -	goto out;
> +	return err;
>  }
>  
>  static void __exit ipgre_fini(void)
> @@ -1819,9 +773,9 @@ static void __exit ipgre_fini(void)
>  	rtnl_link_unregister(&ipgre_tap_ops);
>  	rtnl_link_unregister(&ipgre_link_ops);
>  	rtnl_lock();
> -	if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
> -		pr_info("%s: can't remove protocol\n", __func__);
> +	ipt_del_protocol(&ipgre_protocol);
>  	rtnl_unlock();
> +	unregister_pernet_device(&ipgre_tap_net_ops);
>  	unregister_pernet_device(&ipgre_net_ops);
>  }
>  
> diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
> new file mode 100644
> index 0000000..e5e2fde
> --- /dev/null
> +++ b/net/ipv4/ip_tunnel.c
> @@ -0,0 +1,1283 @@
> +/*
> + *
> + *	This program is free software; you can redistribute it and/or
> + *	modify it under the terms of the GNU General Public License
> + *	as published by the Free Software Foundation; either version
> + *	2 of the License, or (at your option) any later version.
> + *
> + */
> +
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/capability.h>
> +#include <linux/module.h>
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/slab.h>
> +#include <linux/uaccess.h>
> +#include <linux/skbuff.h>
> +#include <linux/netdevice.h>
> +#include <linux/in.h>
> +#include <linux/tcp.h>
> +#include <linux/udp.h>
> +#include <linux/if_arp.h>
> +#include <linux/mroute.h>
> +#include <linux/init.h>
> +#include <linux/in6.h>
> +#include <linux/inetdevice.h>
> +#include <linux/igmp.h>
> +#include <linux/netfilter_ipv4.h>
> +#include <linux/etherdevice.h>
> +#include <linux/if_ether.h>
> +#include <linux/if_vlan.h>
> +
> +#include <net/sock.h>
> +#include <net/ip.h>
> +#include <net/icmp.h>
> +#include <net/protocol.h>
> +#include <net/ipip.h>
> +#include <net/arp.h>
> +#include <net/checksum.h>
> +#include <net/dsfield.h>
> +#include <net/inet_ecn.h>
> +#include <net/xfrm.h>
> +#include <net/net_namespace.h>
> +#include <net/netns/generic.h>
> +#include <net/rtnetlink.h>
> +
> +#if IS_ENABLED(CONFIG_IPV6)
> +#include <net/ipv6.h>
> +#include <net/ip6_fib.h>
> +#include <net/ip6_route.h>
> +#endif
> +
> +#define HASH(addr) ((__force u32)addr^((__force u32)addr>>4))
> +static int ip_tunnel_salt;
> +
> +static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
> +				   __be32 key, __be32 remote)
> +{
> +	unsigned int h = HASH(key);
> +
> +	if (!(itn->ops->flags & HASH_ON_KEY))
> +		h ^= HASH(remote);
> +
> +	return jhash_1word(h, ip_tunnel_salt) & (IPT_HASH_SIZE-1);
> +}
> +
> +/* often modified stats are per cpu, other are shared (netdev->stats) */
> +struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
> +					   struct rtnl_link_stats64 *tot)
> +{
> +	int i;
> +
> +	for_each_possible_cpu(i) {
> +		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
> +		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
> +		unsigned int start;
> +
> +		do {
> +			start = u64_stats_fetch_begin_bh(&tstats->syncp);
> +			rx_packets = tstats->rx_packets;
> +			tx_packets = tstats->tx_packets;
> +			rx_bytes = tstats->rx_bytes;
> +			tx_bytes = tstats->tx_bytes;
> +		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
> +
> +		tot->rx_packets += rx_packets;
> +		tot->tx_packets += tx_packets;
> +		tot->rx_bytes   += rx_bytes;
> +		tot->tx_bytes   += tx_bytes;
> +	}
> +
> +	tot->multicast = dev->stats.multicast;
> +
> +	tot->rx_crc_errors = dev->stats.rx_crc_errors;
> +	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
> +	tot->rx_length_errors = dev->stats.rx_length_errors;
> +	tot->rx_frame_errors = dev->stats.rx_frame_errors;
> +	tot->rx_errors = dev->stats.rx_errors;
> +
> +	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
> +	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
> +	tot->tx_dropped = dev->stats.tx_dropped;
> +	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
> +	tot->tx_errors = dev->stats.tx_errors;
> +
> +	tot->collisions  = dev->stats.collisions;
> +
> +	return tot;
> +}
> +EXPORT_SYMBOL(ip_tunnel_get_stats64);
> +
> +static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
> +				__be16 flags, __be32 key)
> +{
> +	if (p->i_flags & TUNNEL_KEY) {
> +		if (flags & TUNNEL_KEY)
> +			return key == p->i_key;
> +		else
> +			/* key expected, none present */
> +			return false;
> +	} else
> +		return !(flags & TUNNEL_KEY);
> +}
> +
> +static struct ip_tunnel *__tunnel_lookup(struct ip_tunnel_net *itn,
> +				  int link, __be16 flags,
> +				  __be32 remote, __be32 local,
> +				  __be32 key, int hash,
> +				  struct ip_tunnel **cand)
> +{
> +	struct ip_tunnel *t;
> +
> +	for_each_ip_tunnel_rcu(t, itn->tunnels[hash]) {
> +		if (local != t->parms.iph.saddr ||
> +		    remote != t->parms.iph.daddr ||
> +		    !(t->dev->flags & IFF_UP))
> +			continue;
> +
> +		if (!remote &&
> +		    !(ipv4_is_multicast(local) && local == t->parms.iph.daddr))
> +			continue;
> +
> +		if (!ip_tunnel_key_match(&t->parms, flags, key))
> +			continue;
> +
> +		if (t->parms.link == link)
> +			return t;
> +		else
> +			*cand = t;
> +	}
> +	return NULL;
> +}
> +
> +/* Fallback tunnel: no source, no destination, no key, no options */
> +
> +
> +/* Tunnel hash table:
> +   We require exact key match i.e. if a key is present in packet
> +   it will match only tunnel with the same key; if it is not present,
> +   it will match only keyless tunnel.
> +
> +   All keysless packets, if not matched configured keyless tunnels
> +   will match fallback tunnel.
> + */
> +
> +/* Given src, dst and key, find appropriate for input tunnel. */
> +struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
> +				    int link, __be16 flags,
> +				    __be32 remote, __be32 local,
> +				    __be32 key)
> +{
> +	unsigned int h;
> +	struct ip_tunnel *t, *cand = NULL;
> +
> +	h = ip_tunnel_hash(itn, key, remote);
> +
> +	t = __tunnel_lookup(itn, link, flags, remote, local, key, h, &cand);
> +	if (t)
> +		return t;
> +
> +	t = __tunnel_lookup(itn, link, flags, remote, 0, key, h, &cand);
> +	if (t)
> +		return t;
> +
> +	t = __tunnel_lookup(itn, link, flags, 0, local, key, h, &cand);
> +	if (t)
> +		return t;
> +
> +	t = __tunnel_lookup(itn, link, flags, 0, 0, key, h, &cand);
> +	if (t)
> +		return t;
> +
> +	if (cand != NULL)
> +		return cand;
> +
> +	if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
> +		return netdev_priv(itn->fb_tunnel_dev);
> +
> +	return NULL;
> +}
> +EXPORT_SYMBOL(ip_tunnel_lookup);
> +
> +struct ip_tunnel *ip_tunnel_lookup_key(struct ip_tunnel_net *itn, __be32 key)
> +{
> +	struct ip_tunnel *t;
> +	unsigned int h = HASH(key);
> +
> +	h = jhash_1word(h, ip_tunnel_salt) & (IPT_HASH_SIZE-1);
> +	for_each_ip_tunnel_rcu(t, itn->tunnels[h]) {
> +		if (key == t->parms.i_key)
> +			return t;
> +	}
> +
> +	return NULL;
> +}
> +EXPORT_SYMBOL(ip_tunnel_lookup_key);
> +
> +static struct ip_tunnel __rcu **ip_bucket(struct ip_tunnel_net *itn,
> +					  struct ip_tunnel_parm *parms)
> +{
> +	unsigned int h = ip_tunnel_hash(itn, parms->i_key, parms->iph.daddr);
> +
> +	return &itn->tunnels[h];
> +}
> +
> +void ip_tunnel_link(struct ip_tunnel_net *itn, struct ip_tunnel *t)
> +{
> +	struct ip_tunnel __rcu **tp = ip_bucket(itn, &t->parms);
> +
> +	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
> +	rcu_assign_pointer(*tp, t);
> +}
> +EXPORT_SYMBOL(ip_tunnel_link);
> +
> +void ip_tunnel_unlink(struct ip_tunnel_net *itn, struct ip_tunnel *t)
> +{
> +	struct ip_tunnel __rcu **tp;
> +	struct ip_tunnel *iter;
> +
> +	for (tp = ip_bucket(itn, &t->parms);
> +	     (iter = rtnl_dereference(*tp)) != NULL;
> +	     tp = &iter->next) {
> +		if (t == iter) {
> +			rcu_assign_pointer(*tp, t->next);
> +			break;
> +		}
> +	}
> +}
> +EXPORT_SYMBOL(ip_tunnel_unlink);
> +
> +struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
> +				 struct ip_tunnel_parm *parms,
> +				 int type)
> +{
> +	__be32 remote = parms->iph.daddr;
> +	__be32 local = parms->iph.saddr;
> +	__be32 key = parms->i_key;
> +	int link = parms->link;
> +	struct ip_tunnel *t;
> +	struct ip_tunnel __rcu **tp;
> +
> +	for (tp = ip_bucket(itn, parms);
> +	     (t = rtnl_dereference(*tp)) != NULL;
> +	     tp = &t->next)
> +		if (local == t->parms.iph.saddr &&
> +		    remote == t->parms.iph.daddr &&
> +		    key == t->parms.i_key &&
> +		    link == t->parms.link &&
> +		    type == t->dev->type)
> +			break;
> +
> +	return t;
> +}
> +EXPORT_SYMBOL(ip_tunnel_find);
> +
> +static struct net_device *__ip_tunnel_create(struct net *net,
> +					  const struct rtnl_link_ops *ops,
> +					  struct ip_tunnel_parm *parms)
> +{
> +	int err;
> +	struct ip_tunnel *tunnel;
> +	struct net_device *dev;
> +	char name[IFNAMSIZ];
> +
> +	if (parms->name[0])
> +		strlcpy(name, parms->name, IFNAMSIZ);
> +	else {
> +		strlcpy(name, ops->kind, IFNAMSIZ);
> +		strncat(name, "%d", IFNAMSIZ);
> +	}
> +
> +	ASSERT_RTNL();
> +	dev = alloc_netdev(ops->priv_size, name, ops->setup);
> +	if (!dev) {
> +		err = -ENOMEM;
> +		goto failed;
> +	}
> +	dev_net_set(dev, net);
> +
> +	dev->rtnl_link_ops = ops;
> +
> +	tunnel = netdev_priv(dev);
> +	tunnel->parms = *parms;
> +
> +	err = register_netdevice(dev);
> +	if (err)
> +		goto failed_free;
> +
> +	return dev;
> +
> +failed_free:
> +	free_netdev(dev);
> +failed:
> +	return ERR_PTR(err);
> +}
> +
> +static struct ip_tunnel *ip_tunnel_create(struct net *net,
> +					  struct ip_tunnel_net *itn,
> +					  struct ip_tunnel_parm *parms)
> +{
> +	struct ip_tunnel *nt, *fbt;
> +	struct net_device *dev;
> +
> +	BUG_ON(!itn->fb_tunnel_dev);
> +	fbt = netdev_priv(itn->fb_tunnel_dev);
> +	dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
> +	if (IS_ERR(dev))
> +		return NULL;
> +
> +	dev->mtu = ip_tunnel_bind_dev(dev);
> +
> +	nt = netdev_priv(dev);
> +	ip_tunnel_link(itn, nt);
> +	return nt;
> +}
> +
> +void ip_tunnel_err(struct ip_tunnel *t, struct sk_buff *skb, u32 info)
> +{
> +	const int code = icmp_hdr(skb)->code;
> +	const int type = icmp_hdr(skb)->type;
> +
> +	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
> +		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
> +				 t->parms.link, 0, t->parms.iph.protocol, 0);
> +		return;
> +	}
> +	if (type == ICMP_REDIRECT) {
> +		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
> +			      t->parms.iph.protocol, 0);
> +		return;
> +	}
> +	if (t->parms.iph.daddr == 0 ||
> +	    ipv4_is_multicast(t->parms.iph.daddr))
> +		return;
> +
> +	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
> +		return;
> +
> +	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
> +		t->err_count++;
> +	else
> +		t->err_count = 1;
> +	t->err_time = jiffies;
> +}
> +EXPORT_SYMBOL(ip_tunnel_err);
> +
> +int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
> +		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
> +{
> +	int err;
> +
> +	struct pcpu_tstats *tstats;
> +	const struct iphdr *iph = ip_hdr(skb);
> +
> +	secpath_reset(skb);
> +
> +	skb->protocol = tpi->proto;
> +
> +	skb->mac_header = skb->network_header;
> +	skb->pkt_type = PACKET_HOST;
> +
> +	__pskb_pull(skb, tpi->hdr_len);
> +	skb_postpull_rcsum(skb, skb_transport_header(skb), tpi->hdr_len);
> +#ifdef CONFIG_NET_IPGRE_BROADCAST
> +	if (ipv4_is_multicast(iph->daddr)) {
> +		/* Looped back packet, drop it! */
> +		if (rt_is_output_route(skb_rtable(skb)))
> +			goto drop;
> +		tunnel->dev->stats.multicast++;
> +		skb->pkt_type = PACKET_BROADCAST;
> +	}
> +#endif
> +
> +	if ((!(tpi->flags&TUNNEL_CSUM) && tunnel->parms.i_flags&TUNNEL_CSUM)) {
> +		tunnel->dev->stats.rx_crc_errors++;
> +		tunnel->dev->stats.rx_errors++;
> +		goto drop;
> +	}
> +
> +	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
> +		if (!(tpi->flags&TUNNEL_SEQ) ||
> +		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
> +			tunnel->dev->stats.rx_fifo_errors++;
> +			tunnel->dev->stats.rx_errors++;
> +			goto drop;
> +		}
> +		tunnel->i_seqno = ntohl(tpi->seq) + 1;
> +	}
> +
> +	/* Warning: All skb pointers will be invalidated! */
> +	if (tunnel->dev->type == ARPHRD_ETHER) {
> +		if (!pskb_may_pull(skb, ETH_HLEN)) {
> +			tunnel->dev->stats.rx_length_errors++;
> +			tunnel->dev->stats.rx_errors++;
> +			goto drop;
> +		}
> +
> +		iph = ip_hdr(skb);
> +		skb->protocol = eth_type_trans(skb, tunnel->dev);
> +		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
> +	}
> +
> +	__skb_tunnel_rx(skb, tunnel->dev);
> +
> +	skb_reset_network_header(skb);
> +	iph = ip_hdr(skb);
> +	err = IP_ECN_decapsulate(iph, skb);
> +	if (unlikely(err)) {
> +		if (log_ecn_error)
> +			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
> +					&iph->saddr, iph->tos);
> +		if (err > 1) {
> +			++tunnel->dev->stats.rx_frame_errors;
> +			++tunnel->dev->stats.rx_errors;
> +			goto drop;
> +		}
> +	}
> +
> +	tstats = this_cpu_ptr(tunnel->dev->tstats);
> +	u64_stats_update_begin(&tstats->syncp);
> +	tstats->rx_packets++;
> +	tstats->rx_bytes += skb->len;
> +	u64_stats_update_end(&tstats->syncp);
> +
> +	gro_cells_receive(&tunnel->gro_cells, skb);
> +
> +	return 0;
> +
> +drop:
> +	kfree_skb(skb);
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_rcv);
> +
> +static void build_ip_header(struct iphdr  *iph, struct flowi4 *fl4, int proto,
> +			    __be16 df, u8 tos, u8 ttl)
> +{
> +	/*
> +	 *	Push down and install the IPIP header.
> +	 */
> +
> +	iph->version		=	4;
> +	iph->ihl		=	sizeof(struct iphdr) >> 2;
> +	iph->frag_off		=	df;
> +	iph->protocol		=	proto;
> +	iph->tos		=	tos;
> +	iph->daddr		=	fl4->daddr;
> +	iph->saddr		=	fl4->saddr;
> +	iph->ttl		=	ttl;
> +}
> +
> +static inline struct rtable *ip_route_output_tunnel(struct net *net,
> +						  struct flowi4 *fl4,
> +						  int proto,
> +						  __be32 daddr, __be32 saddr,
> +						  __be32 key, __u8 tos, int oif)
> +{
> +	memset(fl4, 0, sizeof(*fl4));
> +	fl4->flowi4_oif = oif;
> +	fl4->daddr = daddr;
> +	fl4->saddr = saddr;
> +	fl4->flowi4_tos = tos;
> +	fl4->flowi4_proto = proto;
> +	fl4->fl4_gre_key = key;
> +	return ip_route_output_key(net, fl4);
> +}
> +
> +int  ip_tunnel_build_iphdr(struct sk_buff *skb, struct net_device *dev,
> +			   const struct iphdr *tiph, int hlen,
> +			   struct iphdr  *niph)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	const struct iphdr  *old_iph = ip_hdr(skb);
> +	struct flowi4 fl4;
> +	u8     tos, ttl;
> +	__be16 df;
> +	struct rtable *rt;		/* Route to the other host */
> +	struct net_device *tdev;	/* Device to other host */
> +	unsigned int max_headroom;	/* The extra header space needed */
> +	__be32 dst;
> +	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
> +
> +	if (dev->type == ARPHRD_ETHER)
> +		IPCB(skb)->flags = 0;
> +
> +	dst = tiph->daddr;
> +	if (dst == 0) {
> +		/* NBMA tunnel */
> +
> +		if (skb_dst(skb) == NULL) {
> +			dev->stats.tx_fifo_errors++;
> +			goto tx_error;
> +		}
> +
> +		if (skb->protocol == htons(ETH_P_IP)) {
> +			rt = skb_rtable(skb);
> +			dst = rt_nexthop(rt, old_iph->daddr);
> +		}
> +#if IS_ENABLED(CONFIG_IPV6)
> +		else if (skb->protocol == htons(ETH_P_IPV6)) {
> +			const struct in6_addr *addr6;
> +			struct neighbour *neigh;
> +			bool do_tx_error_icmp;
> +			int addr_type;
> +
> +			neigh = dst_neigh_lookup(skb_dst(skb),
> +						 &ipv6_hdr(skb)->daddr);
> +			if (neigh == NULL)
> +				goto tx_error;
> +
> +			addr6 = (const struct in6_addr *)&neigh->primary_key;
> +			addr_type = ipv6_addr_type(addr6);
> +
> +			if (addr_type == IPV6_ADDR_ANY) {
> +				addr6 = &ipv6_hdr(skb)->daddr;
> +				addr_type = ipv6_addr_type(addr6);
> +			}
> +
> +			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
> +				do_tx_error_icmp = true;
> +			else {
> +				do_tx_error_icmp = false;
> +				dst = addr6->s6_addr32[3];
> +			}
> +			neigh_release(neigh);
> +			if (do_tx_error_icmp)
> +				goto tx_error_icmp;
> +		}
> +#endif
> +		else
> +			goto tx_error;
> +	}
> +
> +	tos = tiph->tos;
> +	if (tos == 1) {
> +		tos = 0;
> +		if (skb->protocol == htons(ETH_P_IP))
> +			tos = old_iph->tos;
> +		else if (skb->protocol == htons(ETH_P_IPV6))
> +			tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
> +	}
> +
> +	rt = ip_route_output_tunnel(dev_net(dev), &fl4,
> +				    tunnel->parms.iph.protocol,
> +				    dst, tiph->saddr,
> +				    tunnel->parms.o_key, RT_TOS(tos),
> +				    tunnel->parms.link);
> +	if (IS_ERR(rt)) {
> +		dev->stats.tx_carrier_errors++;
> +		goto tx_error;
> +	}
> +	tdev = rt->dst.dev;
> +
> +	if (tdev == dev) {
> +		ip_rt_put(rt);
> +		dev->stats.collisions++;
> +		goto tx_error;
> +	}
> +
> +	df = tiph->frag_off;
> +	if (dev->type != ARPHRD_ETHER) {
> +		int mtu;
> +
> +		if (df)
> +			mtu = dst_mtu(&rt->dst) - dev->hard_header_len - t_hlen;
> +		else
> +			mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
> +
> +		if (skb_dst(skb))
> +			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
> +
> +		if (skb->protocol == htons(ETH_P_IP)) {
> +			df |= (old_iph->frag_off&htons(IP_DF));
> +
> +			if ((old_iph->frag_off&htons(IP_DF)) &&
> +					mtu < ntohs(old_iph->tot_len)) {
> +				icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
> +				ip_rt_put(rt);
> +				goto tx_error;
> +			}
> +		}
> +#if IS_ENABLED(CONFIG_IPV6)
> +		else if (skb->protocol == htons(ETH_P_IPV6)) {
> +			struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
> +
> +			if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
> +				   mtu >= IPV6_MIN_MTU) {
> +				if ((tunnel->parms.iph.daddr &&
> +				!ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
> +				rt6->rt6i_dst.plen == 128) {
> +					rt6->rt6i_flags |= RTF_MODIFIED;
> +					dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
> +				}
> +			}
> +
> +			if (mtu >= IPV6_MIN_MTU &&
> +			    mtu < skb->len - t_hlen + hlen) {
> +				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
> +				ip_rt_put(rt);
> +				goto tx_error;
> +			}
> +		}
> +#endif
> +	}
> +
> +	if (tunnel->err_count > 0) {
> +		if (time_before(jiffies,
> +				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
> +			tunnel->err_count--;
> +
> +			dst_link_failure(skb);
> +		} else
> +			tunnel->err_count = 0;
> +	}
> +
> +	tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
> +
> +	ttl = tiph->ttl;
> +	if (ttl == 0) {
> +		if (skb->protocol == htons(ETH_P_IP))
> +			ttl = old_iph->ttl;
> +#if IS_ENABLED(CONFIG_IPV6)
> +		else if (skb->protocol == htons(ETH_P_IPV6))
> +			ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
> +#endif
> +		else
> +			ttl = ip4_dst_hoplimit(&rt->dst);
> +	}
> +
> +	max_headroom = LL_RESERVED_SPACE(tdev) + hlen + rt->dst.header_len;
> +	if (max_headroom > dev->needed_headroom)
> +		dev->needed_headroom = max_headroom;
> +
> +	/* Push down and install the IPIP header. */
> +
> +	build_ip_header(niph, &fl4, tunnel->parms.iph.protocol, df, tos, ttl);
> +	skb_dst_drop(skb);
> +	skb_dst_set(skb, &rt->dst);
> +	return 0;
> +
> +#if IS_ENABLED(CONFIG_IPV6)
> +tx_error_icmp:
> +	dst_link_failure(skb);
> +#endif
> +tx_error:
> +	dev->stats.tx_errors++;
> +	dev_kfree_skb(skb);
> +	return -1;
> +}
> +EXPORT_SYMBOL(ip_tunnel_build_iphdr);
> +
> +static void free_linked_skbs(struct sk_buff *skb)
> +{
> +	while (skb) {
> +		struct sk_buff *next = skb->next;
> +		kfree_skb(skb);
> +		skb = next;
> +	}
> +}
> +
> +
> +static int send_frags(struct sk_buff *skb,
> +		      int tunnel_hlen)
> +{
> +	int sent_len;
> +
> +	sent_len = 0;
> +	while (skb) {
> +		struct sk_buff *next = skb->next;
> +		int frag_len = skb->len - tunnel_hlen;
> +		int err;
> +
> +		skb->next = NULL;
> +		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
> +
> +		err = ip_local_out(skb);
> +		skb = next;
> +		if (unlikely(net_xmit_eval(err)))
> +			goto free_frags;
> +		sent_len += frag_len;
> +	}
> +
> +	return sent_len;
> +
> +free_frags:
> +	/*
> +	 * There's no point in continuing to send fragments once one has been
> +	 * dropped so just free the rest.  This may help improve the congestion
> +	 * that caused the first packet to be dropped.
> +	 */
> +	free_linked_skbs(skb);
> +	return sent_len;
> +}
> +
> +
> +static struct sk_buff *handle_offloads(struct sk_buff *skb)
> +{
> +	int err;
> +
> +	if (skb_is_gso(skb)) {
> +		return skb;
> +	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
> +		/* Pages aren't locked and could change at any time.
> +		 * If this happens after we compute the checksum, the
> +		 * checksum will be wrong.  We linearize now to avoid
> +		 * this problem.
> +		 */
> +		err = __skb_linearize(skb);
> +		if (unlikely(err))
> +			goto error;
> +
> +		err = skb_checksum_help(skb);
> +		if (unlikely(err))
> +			goto error;
> +	}
> +
> +	skb->ip_summed = CHECKSUM_NONE;
> +
> +	return skb;
> +
> +error:
> +	kfree_skb(skb);
> +	return ERR_PTR(err);
> +}
> +
> +
> +int ip_tunnel_xmit(struct sk_buff *skb,
> +		  const struct iphdr *tiph,
> +		  const struct tnl_ptk_info *tpi,
> +		  struct sk_buff *(*build_header)(struct sk_buff *skb,
> +					const struct tnl_ptk_info *tpi))
> +{
> +	int sent_len = 0;
> +	struct dst_entry *dst = skb_dst(skb);
> +	int tunnel_hlen = tpi->hdr_len + sizeof(struct iphdr);
> +
> +	skb = handle_offloads(skb);
> +	if (IS_ERR(skb))
> +		goto error;
> +
> +	while (skb) {
> +		struct iphdr *iph;
> +		struct sk_buff *next_skb = skb->next;
> +
> +		skb->next = NULL;
> +
> +		if (vlan_tx_tag_present(skb)) {
> +			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
> +			if (unlikely(!skb))
> +				goto next;
> +		}
> +
> +
> +		skb_push(skb, tunnel_hlen);
> +		skb_reset_network_header(skb);
> +		skb_set_transport_header(skb, sizeof(struct iphdr));
> +
> +		if (next_skb)
> +			skb_dst_set(skb, dst_clone(dst));
> +		else
> +			skb_dst_set(skb, dst);
> +
> +		/* Push IP header. */
> +		iph = ip_hdr(skb);
> +		*iph = *tiph;
> +
> +		ip_select_ident(iph, dst, NULL);
> +
> +		/* Push Tunnel header. */
> +		skb = (*build_header)(skb, tpi);
> +		if (!skb)
> +			goto next;
> +
> +		sent_len += send_frags(skb, tunnel_hlen);
> +
> +next:
> +		skb = next_skb;
> +	}
> +
> +	return sent_len;
> +error:
> +	dst_release(dst);
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_xmit);
> +
> +int ip_tunnel_bind_dev(struct net_device *dev)
> +{
> +	struct net_device *tdev = NULL;
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	const struct iphdr *iph;
> +	int hlen = LL_MAX_HEADER;
> +	int mtu = ETH_DATA_LEN;
> +	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
> +
> +	iph = &tunnel->parms.iph;
> +
> +	/* Guess output device to choose reasonable mtu and needed_headroom */
> +
> +	if (iph->daddr) {
> +		struct flowi4 fl4;
> +		struct rtable *rt;
> +
> +		rt = ip_route_output_tunnel(dev_net(dev), &fl4,
> +					 tunnel->parms.iph.protocol,
> +					 iph->daddr, iph->saddr,
> +					 tunnel->parms.o_key,
> +					 RT_TOS(iph->tos),
> +					 tunnel->parms.link);
> +		if (!IS_ERR(rt)) {
> +			tdev = rt->dst.dev;
> +			ip_rt_put(rt);
> +		}
> +
> +	}
> +
> +	if (!tdev && tunnel->parms.link)
> +		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
> +
> +	if (tdev) {
> +		hlen = tdev->hard_header_len + tdev->needed_headroom;
> +		mtu = tdev->mtu;
> +	}
> +	dev->iflink = tunnel->parms.link;
> +
> +	dev->needed_headroom = t_hlen + hlen;
> +	mtu -= (dev->hard_header_len + t_hlen);
> +
> +	if (mtu < 68)
> +		mtu = 68;
> +
> +	return mtu;
> +}
> +EXPORT_SYMBOL(ip_tunnel_bind_dev);
> +
> +int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
> +{
> +	int err = 0;
> +	struct ip_tunnel_parm p;
> +	struct ip_tunnel *t;
> +	struct net *net = dev_net(dev);
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct ip_tunnel_net *itn = net_generic(net, tunnel->ipt_net_id);
> +
> +	BUG_ON(!itn->fb_tunnel_dev);
> +	switch (cmd) {
> +	case SIOCGETTUNNEL:
> +		t = NULL;
> +		if (dev == itn->fb_tunnel_dev) {
> +			err = itn->ops->get_ioctl_param(dev, ifr, &p);
> +			if (err)
> +				break;
> +
> +
> +			t = ip_tunnel_find(itn, &p, itn->fb_tunnel_dev->type);
> +		}
> +		if (t == NULL)
> +			t = netdev_priv(dev);
> +		memcpy(&p, &t->parms, sizeof(p));
> +		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
> +			err = -EFAULT;
> +		break;
> +
> +	case SIOCADDTUNNEL:
> +	case SIOCCHGTUNNEL:
> +		err = -EPERM;
> +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
> +			goto done;
> +
> +		err = itn->ops->get_ioctl_param(dev, ifr, &p);
> +		if (err)
> +			break;
> +
> +		t = ip_tunnel_find(itn, &p, itn->fb_tunnel_dev->type);
> +
> +		if (!t && (cmd == SIOCADDTUNNEL))
> +			t = ip_tunnel_create(net, itn, &p);
> +
> +		if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
> +			if (t != NULL) {
> +				if (t->dev != dev) {
> +					err = -EEXIST;
> +					break;
> +				}
> +			} else {
> +				unsigned int nflags = 0;
> +
> +				if (ipv4_is_multicast(p.iph.daddr))
> +					nflags = IFF_BROADCAST;
> +				else if (p.iph.daddr)
> +					nflags = IFF_POINTOPOINT;
> +
> +				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
> +					err = -EINVAL;
> +					break;
> +				}
> +
> +				t = netdev_priv(dev);
> +				ip_tunnel_unlink(itn, t);
> +				synchronize_net();
> +
> +				t->parms.iph.saddr = p.iph.saddr;
> +				t->parms.iph.daddr = p.iph.daddr;
> +				t->parms.i_key = p.i_key;
> +				t->parms.o_key = p.o_key;
> +				memcpy(dev->dev_addr, &p.iph.saddr, 4);
> +				memcpy(dev->broadcast, &p.iph.daddr, 4);
> +				ip_tunnel_link(itn, t);
> +				netdev_state_change(dev);
> +			}
> +		}
> +
> +		if (t) {
> +			err = 0;
> +			if (cmd == SIOCCHGTUNNEL) {
> +				t->parms.iph.ttl = p.iph.ttl;
> +				t->parms.iph.tos = p.iph.tos;
> +				t->parms.iph.frag_off = p.iph.frag_off;
> +				if (t->parms.link != p.link) {
> +					t->parms.link = p.link;
> +					dev->mtu = ip_tunnel_bind_dev(dev);
> +					netdev_state_change(dev);
> +				}
> +			}
> +			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
> +				err = -EFAULT;
> +		} else
> +			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
> +		break;
> +
> +	case SIOCDELTUNNEL:
> +		err = -EPERM;
> +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
> +			goto done;
> +
> +		if (dev == itn->fb_tunnel_dev) {
> +			err = itn->ops->get_ioctl_param(dev, ifr, &p);
> +			if (err)
> +				break;
> +
> +			err = -ENOENT;
> +			t = ip_tunnel_find(itn, &p, itn->fb_tunnel_dev->type);
> +			if (t == NULL)
> +				goto done;
> +			err = -EPERM;
> +			if (t == netdev_priv(itn->fb_tunnel_dev))
> +				goto done;
> +			dev = t->dev;
> +		}
> +		unregister_netdevice(dev);
> +		err = 0;
> +		break;
> +
> +	default:
> +		err = -EINVAL;
> +	}
> +
> +done:
> +	return err;
> +}
> +EXPORT_SYMBOL(ip_tunnel_ioctl);
> +
> +int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
> +
> +	if (new_mtu < 68 ||
> +	    new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
> +		return -EINVAL;
> +	dev->mtu = new_mtu;
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_change_mtu);
> +
> +
> +void ip_tunnel_dev_free(struct net_device *dev)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +
> +	gro_cells_destroy(&tunnel->gro_cells);
> +	free_percpu(dev->tstats);
> +	free_netdev(dev);
> +}
> +EXPORT_SYMBOL(ip_tunnel_dev_free);
> +
> +void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
> +{
> +	struct net *net = dev_net(dev);
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct ip_tunnel_net *itn;
> +
> +	itn = net_generic(net, tunnel->ipt_net_id);
> +
> +	if (itn->fb_tunnel_dev != dev) {
> +		ip_tunnel_unlink(itn, netdev_priv(dev));
> +		unregister_netdevice_queue(dev, head);
> +	}
> +}
> +EXPORT_SYMBOL(ip_tunnel_dellink);
> +
> +int __net_init ip_tunnel_init_net(struct net *net, int ipt_net_id,
> +				  struct rtnl_link_ops *ops)
> +{
> +	struct ip_tunnel_net *itn = net_generic(net, ipt_net_id);
> +	struct ip_tunnel_parm parms;
> +
> +	itn->tunnels = kzalloc(sizeof(void *) * IPT_HASH_SIZE, GFP_KERNEL);
> +	if (!itn->tunnels)
> +		return -ENOMEM;
> +
> +	if (!ops) {
> +		itn->fb_tunnel_dev = NULL;
> +		return 0;
> +	}
> +	memset(&parms, 0, sizeof(parms));
> +	rtnl_lock();
> +	itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
> +	if (IS_ERR(itn->fb_tunnel_dev)) {
> +		rtnl_unlock();
> +		kfree(itn->tunnels);
> +		return PTR_ERR(itn->fb_tunnel_dev);
> +	}
> +
> +	rtnl_unlock();
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_init_net);
> +
> +static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
> +{
> +
> +	int h;
> +	for (h = 0; h < IPT_HASH_SIZE; h++) {
> +		struct ip_tunnel *t;
> +
> +		t = rtnl_dereference(itn->tunnels[h]);
> +
> +		while (t != NULL) {
> +			unregister_netdevice_queue(t->dev, head);
> +			t = rtnl_dereference(t->next);
> +		}
> +	}
> +	if (itn->fb_tunnel_dev)
> +		unregister_netdevice_queue(itn->fb_tunnel_dev, head);
> +}
> +
> +void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
> +{
> +	LIST_HEAD(list);
> +
> +	rtnl_lock();
> +	ip_tunnel_destroy(itn, &list);
> +	unregister_netdevice_many(&list);
> +	rtnl_unlock();
> +	kfree(itn->tunnels);
> +}
> +EXPORT_SYMBOL(ip_tunnel_delete_net);
> +
> +/* Can be moved to ip-tunnel */
> +int ip_tunnel_newlink(struct net *src_net, struct net_device *dev,
> +			 struct nlattr *tb[],
> +			struct nlattr *data[])
> +{
> +	struct ip_tunnel *nt;
> +	struct net *net = dev_net(dev);
> +	struct ip_tunnel_net *itn;
> +	int mtu;
> +	int err;
> +
> +	nt = netdev_priv(dev);
> +	itn = net_generic(net, nt->ipt_net_id);
> +	err = itn->ops->parse_netlink_parms(nt, data, tb, &nt->parms);
> +	if (err)
> +		return err;
> +
> +	if (ip_tunnel_find(itn, &nt->parms, dev->type))
> +		return -EEXIST;
> +
> +	err = register_netdevice(dev);
> +	if (err)
> +		goto out;
> +
> +	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
> +		eth_hw_addr_random(dev);
> +
> +	mtu = ip_tunnel_bind_dev(dev);
> +	if (!tb[IFLA_MTU])
> +		dev->mtu = mtu;
> +
> +	ip_tunnel_link(itn, nt);
> +
> +out:
> +	return err;
> +}
> +EXPORT_SYMBOL(ip_tunnel_newlink);
> +
> +int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
> +			    struct nlattr *data[])
> +{
> +	struct ip_tunnel *t, *nt;
> +	struct net *net = dev_net(dev);
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct ip_tunnel_net *itn = net_generic(net, tunnel->ipt_net_id);
> +
> +	struct ip_tunnel_parm p;
> +	int mtu;
> +
> +	if (dev == itn->fb_tunnel_dev)
> +		return -EINVAL;
> +
> +	nt = netdev_priv(dev);
> +	itn->ops->parse_netlink_parms(nt, data, tb, &p);
> +
> +	t = ip_tunnel_find(itn, &p, dev->type);
> +
> +	if (t) {
> +		if (t->dev != dev)
> +			return -EEXIST;
> +	} else {
> +		t = nt;
> +
> +		if (dev->type != ARPHRD_ETHER) {
> +			unsigned int nflags = 0;
> +
> +			if (ipv4_is_multicast(p.iph.daddr))
> +				nflags = IFF_BROADCAST;
> +			else if (p.iph.daddr)
> +				nflags = IFF_POINTOPOINT;
> +
> +			if ((dev->flags ^ nflags) &
> +			    (IFF_POINTOPOINT | IFF_BROADCAST))
> +				return -EINVAL;
> +		}
> +
> +		ip_tunnel_unlink(itn, t);
> +		t->parms.iph.saddr = p.iph.saddr;
> +		t->parms.iph.daddr = p.iph.daddr;
> +		t->parms.i_key = p.i_key;
> +		if (dev->type != ARPHRD_ETHER) {
> +			memcpy(dev->dev_addr, &p.iph.saddr, 4);
> +			memcpy(dev->broadcast, &p.iph.daddr, 4);
> +		}
> +		ip_tunnel_link(itn, t);
> +		netdev_state_change(dev);
> +	}
> +
> +	t->parms.o_key = p.o_key;
> +	t->parms.iph.ttl = p.iph.ttl;
> +	t->parms.iph.tos = p.iph.tos;
> +	t->parms.iph.frag_off = p.iph.frag_off;
> +
> +	if (t->parms.link != p.link) {
> +		t->parms.link = p.link;
> +		mtu = ip_tunnel_bind_dev(dev);
> +		if (!tb[IFLA_MTU])
> +			dev->mtu = mtu;
> +		netdev_state_change(dev);
> +	}
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_changelink);
> +
> +int ip_tunnel_init(struct net_device *dev)
> +{
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct iphdr *iph;
> +	int err;
> +
> +	dev->destructor	= ip_tunnel_dev_free;
> +	dev->tstats = alloc_percpu(struct pcpu_tstats);
> +	if (!dev->tstats)
> +		return -ENOMEM;
> +
> +	err = gro_cells_init(&tunnel->gro_cells, dev);
> +	if (err) {
> +		free_percpu(dev->tstats);
> +		return err;
> +	}
> +
> +	tunnel->dev = dev;
> +	strcpy(tunnel->parms.name, dev->name);
> +
> +	iph = &tunnel->parms.iph;
> +	iph->version		= 4;
> +	iph->ihl		= 5;
> +
> +	/* Can use a lockless transmit, unless we generate output sequences */
> +	if (!(tunnel->parms.o_flags & TUNNEL_SEQ))
> +		dev->features |= NETIF_F_LLTX;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL(ip_tunnel_init);
> +
> +void ip_tunnel_uninit(struct net_device *dev)
> +{
> +	struct net *net = dev_net(dev);
> +	struct ip_tunnel *tunnel = netdev_priv(dev);
> +	struct ip_tunnel_net *itn;
> +
> +	itn = net_generic(net, tunnel->ipt_net_id);
> +	/* fb_tunnel_dev will be unregisted in net-exit call. */
> +	if (itn->fb_tunnel_dev != dev)
> +		ip_tunnel_unlink(itn, netdev_priv(dev));
> +}
> +EXPORT_SYMBOL(ip_tunnel_uninit);
> +
> +
> +struct hlist_head ipt_proto[IPT_HASH_BUCKETS];
> +EXPORT_SYMBOL(ipt_proto);
> +
> +int ipt_add_protocol(struct ipt_protocol *newp)
> +{
> +	struct hlist_head *head;
> +	struct ipt_protocol *ipt = NULL;
> +	struct hlist_node *n;
> +	ASSERT_RTNL();
> +
> +	head = ipt_hash_bucket(newp->type, newp->portno);
> +
> +	hlist_for_each_entry_rcu(ipt, n, head, node) {
> +		if (ipt->type != newp->type || ipt->portno != newp->portno)
> +			continue;
> +		if (ipt->priority > newp->priority) {
> +			hlist_add_before_rcu(&newp->node, &ipt->node);
> +			return 0;
> +		}
> +		if (ipt->priority == newp->priority)
> +			return -1;
> +	}
> +	if (ipt)
> +		hlist_add_after_rcu(&newp->node, &ipt->node);
> +	else
> +		hlist_add_head_rcu(&newp->node, head);
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(ipt_add_protocol);
> +
> +void ipt_del_protocol(struct ipt_protocol *proto)
> +{
> +	ASSERT_RTNL();
> +
> +	hlist_del_rcu(&proto->node);
> +	synchronize_rcu();
> +}
> +EXPORT_SYMBOL_GPL(ipt_del_protocol);
> +
> +static int __init ip_tunnel_mod_init(void)
> +{
> +	pr_info("IP_Tunnel init\n");
> +	get_random_bytes(&ip_tunnel_salt, sizeof(ip_tunnel_salt));
> +	return 0;
> +}
> +static void __exit ip_tunnel_fini(void)
> +{
> +}
> +
> +module_init(ip_tunnel_mod_init);
> +module_exit(ip_tunnel_fini);
> +MODULE_LICENSE("GPL");
> diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
> index fb828e9..1b19ba6 100644
> --- a/net/ipv6/ip6_tunnel.c
> +++ b/net/ipv6/ip6_tunnel.c
> @@ -47,6 +47,7 @@
>  
>  #include <net/icmp.h>
>  #include <net/ip.h>
> +#include <net/ipip.h>
>  #include <net/ipv6.h>
>  #include <net/ip6_route.h>
>  #include <net/addrconf.h>
> -- 
> 1.7.1
> 
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev
> 

-- 
yamahata



More information about the dev mailing list