[ovs-dev] [PATCH] datapath: Integration with upstream kernel tunneling.

Pravin B Shelar pshelar at nicira.com
Sat Mar 30 16:20:07 UTC 2013


Following patch restructure ovs tunneling to make use of kernel
api. Doing this tunneling code is simplified as most of protocol
processing on send and recv is pushed to kernel tunneling. This
way we can share most protocol related code between openvswitch
tunneling and linux tunnel devices.

Signed-off-by: Pravin B Shelar <pshelar at nicira.com>
---
 acinclude.m4                                    |    4 +
 datapath/datapath.c                             |   20 ++-
 datapath/linux/Modules.mk                       |    9 +-
 datapath/linux/compat/gre.c                     |  375 +++++++++++++++++++++++
 datapath/linux/compat/include/linux/netdevice.h |   26 ++
 datapath/linux/compat/include/linux/skbuff.h    |    7 +-
 datapath/linux/compat/include/net/gre.h         |  118 +++++++
 datapath/linux/compat/include/net/ip_tunnels.h  |   69 +++++
 datapath/linux/compat/include/net/vxlan.h       |   45 +++
 datapath/linux/compat/vxlan.c                   |  311 +++++++++++++++++++
 datapath/tunnel.c                               |  307 +++++++++----------
 datapath/tunnel.h                               |   42 +---
 datapath/vport-gre.c                            |  279 +++++++----------
 datapath/vport-lisp.c                           |  258 +++++++---------
 datapath/vport-vxlan.c                          |  277 +++++-------------
 datapath/vport.c                                |   15 +
 datapath/vport.h                                |    3 +
 17 files changed, 1448 insertions(+), 717 deletions(-)
 create mode 100644 datapath/linux/compat/gre.c
 create mode 100644 datapath/linux/compat/include/net/gre.h
 create mode 100644 datapath/linux/compat/include/net/ip_tunnels.h
 create mode 100644 datapath/linux/compat/include/net/vxlan.h
 create mode 100644 datapath/linux/compat/vxlan.c

diff --git a/acinclude.m4 b/acinclude.m4
index 19a47dd..c30855f 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -254,6 +254,8 @@ AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [
   OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [consume_skb])
   OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [skb_frag_page])
   OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [skb_reset_mac_len])
+  OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [SKB_GSO_GRE])
+  OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [SKB_GSO_UDP_TUNNEL])
 
   OVS_GREP_IFELSE([$KSRC/include/linux/string.h], [kmemdup], [],
                   [OVS_GREP_IFELSE([$KSRC/include/linux/slab.h], [kmemdup])])
@@ -262,6 +264,8 @@ AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [
                   [OVS_DEFINE([HAVE_BOOL_TYPE])])
   OVS_GREP_IFELSE([$KSRC/include/linux/types.h], [__wsum],
                   [OVS_DEFINE([HAVE_CSUM_TYPES])])
+  OVS_GREP_IFELSE([$KSRC/include/uapi/linux/types.h], [__wsum],
+                  [OVS_DEFINE([HAVE_CSUM_TYPES])])
 
   OVS_GREP_IFELSE([$KSRC/include/net/checksum.h], [csum_replace4])
   OVS_GREP_IFELSE([$KSRC/include/net/checksum.h], [csum_unfold])
diff --git a/datapath/datapath.c b/datapath/datapath.c
index 9cd4b0e..4d9a3b0 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -49,8 +49,10 @@
 #include <linux/rculist.h>
 #include <linux/dmi.h>
 #include <net/genetlink.h>
+#include <net/gre.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/vxlan.h>
 
 #include "checksum.h"
 #include "datapath.h"
@@ -61,7 +63,7 @@
 #include "vport-internal_dev.h"
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
-    LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+    LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
 #error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
 #endif
 
@@ -2349,10 +2351,18 @@ static int __init dp_init(void)
 	if (err)
 		goto error_genl_exec;
 
-	err = ovs_flow_init();
+	err = gre_compat_init();
 	if (err)
 		goto error_wq;
 
+	err = vxlan_compat_init();
+	if (err)
+		goto error_gre_compat;
+
+	err = ovs_flow_init();
+	if (err)
+		goto error_vxlan_compat;
+
 	err = ovs_vport_init();
 	if (err)
 		goto error_flow_exit;
@@ -2381,6 +2391,10 @@ error_vport_exit:
 	ovs_vport_exit();
 error_flow_exit:
 	ovs_flow_exit();
+error_vxlan_compat:
+	vxlan_compat_cleanup();
+error_gre_compat:
+	gre_compat_exit();
 error_wq:
 	ovs_workqueues_exit();
 error_genl_exec:
@@ -2398,6 +2412,8 @@ static void dp_cleanup(void)
 	rcu_barrier();
 	ovs_vport_exit();
 	ovs_flow_exit();
+	vxlan_compat_cleanup();
+	gre_compat_exit();
 	ovs_workqueues_exit();
 	genl_exec_exit();
 }
diff --git a/datapath/linux/Modules.mk b/datapath/linux/Modules.mk
index 4324336..a321a04 100644
--- a/datapath/linux/Modules.mk
+++ b/datapath/linux/Modules.mk
@@ -3,6 +3,7 @@ openvswitch_sources += \
 	linux/compat/dev-openvswitch.c \
 	linux/compat/exthdrs_core.c \
 	linux/compat/flex_array.c \
+	linux/compat/gre.c \
 	linux/compat/genetlink-openvswitch.c \
 	linux/compat/ip_output-openvswitch.c \
 	linux/compat/kmemdup.c \
@@ -11,7 +12,8 @@ openvswitch_sources += \
 	linux/compat/reciprocal_div.c \
 	linux/compat/skbuff-openvswitch.c \
 	linux/compat/time.c	\
-	linux/compat/workqueue.c
+	linux/compat/workqueue.c \
+	linux/compat/vxlan.c
 openvswitch_headers += \
 	linux/compat/include/asm/percpu.h \
 	linux/compat/include/linux/compiler.h \
@@ -61,12 +63,15 @@ openvswitch_headers += \
 	linux/compat/include/net/checksum.h \
 	linux/compat/include/net/dst.h \
 	linux/compat/include/net/genetlink.h \
+	linux/compat/include/net/gre.h \
 	linux/compat/include/net/inet_frag.h \
 	linux/compat/include/net/ip.h \
+	linux/compat/include/net/ip_tunnels.h \
 	linux/compat/include/net/ipv6.h \
 	linux/compat/include/net/net_namespace.h \
 	linux/compat/include/net/netlink.h \
 	linux/compat/include/net/protocol.h \
 	linux/compat/include/net/route.h \
 	linux/compat/include/net/sock.h \
-	linux/compat/include/net/netns/generic.h
+	linux/compat/include/net/netns/generic.h \
+	linux/compat/include/net/vxlan.h
diff --git a/datapath/linux/compat/gre.c b/datapath/linux/compat/gre.c
new file mode 100644
index 0000000..d2e6fe2
--- /dev/null
+++ b/datapath/linux/compat/gre.c
@@ -0,0 +1,375 @@
+
+#include <linux/module.h>
+#include <linux/if.h>
+#include <linux/icmp.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/if_tunnel.h>
+#include <linux/spinlock.h>
+#include <net/protocol.h>
+#include <net/gre.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/xfrm.h>
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)
+static struct gre_cisco_protocol __rcu *gre_cisco_proto;
+static DEFINE_SPINLOCK(gre_proto_lock);
+
+static int gre_handle_csum_offloads(struct sk_buff *skb, bool gre_csum)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
+		int err;
+
+		err = skb_checksum_help(skb);
+		return err;
+	} else if (skb->ip_summed != CHECKSUM_PARTIAL)
+		skb->ip_summed = CHECKSUM_NONE;
+	return 0;
+}
+
+#ifdef HAVE_SKB_GSO_GRE
+struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+{
+	int err;
+
+	if (likely(!skb->encapsulation)) {
+		skb_reset_inner_headers(skb);
+		skb->encapsulation = 1;
+	}
+
+	if (skb_is_gso(skb)) {
+		err = skb_unclone(skb, GFP_ATOMIC);
+		if (unlikely(err))
+			goto error;
+		skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
+		return skb;
+	}
+	err = gre_handle_csum_offloads(skb, gre_csum);
+	if (err)
+		goto error;
+
+	return skb;
+error:
+	kfree_skb(skb);
+	return ERR_PTR(err);
+}
+
+static bool is_gre_gso(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & SKB_GSO_GRE;
+}
+#else
+struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+{
+	int err;
+
+	if (skb_is_gso(skb)) {
+		struct sk_buff *nskb;
+
+		nskb = __skb_gso_segment(skb, 0, false);
+		if (IS_ERR(nskb)) {
+			kfree_skb(skb);
+			return nskb;
+		}
+
+		consume_skb(skb);
+		skb = nskb;
+		return skb;
+	}
+	err = gre_handle_csum_offloads(skb, gre_csum);
+	if (err)
+		goto error;
+
+	return skb;
+error:
+	kfree_skb(skb);
+	return ERR_PTR(err);
+
+}
+
+static bool is_gre_gso(struct sk_buff *skb)
+{
+	return false;
+}
+#endif
+
+void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+		      int hdr_len)
+{
+	struct gre_base_hdr *greh;
+
+	skb_push(skb, hdr_len);
+
+	greh = (struct gre_base_hdr *)skb->data;
+	greh->flags = tnl_flags_to_gre_flags(tpi->flags);
+	greh->protocol = tpi->proto;
+
+	if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
+		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+		if (tpi->flags&TUNNEL_SEQ) {
+			*ptr = tpi->seq;
+			ptr--;
+		}
+		if (tpi->flags&TUNNEL_KEY) {
+			*ptr = tpi->key;
+			ptr--;
+		}
+		if (tpi->flags&TUNNEL_CSUM && !is_gre_gso(skb)) {
+			*(__sum16 *)ptr = 0;
+			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+								 skb->len, 0));
+		}
+	}
+}
+
+static __sum16 check_checksum(struct sk_buff *skb)
+{
+	__sum16 csum = 0;
+
+	switch (skb->ip_summed) {
+	case CHECKSUM_COMPLETE:
+		csum = csum_fold(skb->csum);
+
+		if (!csum)
+			break;
+		/* Fall through. */
+
+	case CHECKSUM_NONE:
+		skb->csum = 0;
+		csum = __skb_checksum_complete(skb);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		break;
+	}
+
+	return csum;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+			    bool *csum_err)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	struct gre_base_hdr *greh;
+	__be32 *options;
+	int hdr_len;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
+	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+		return -EINVAL;
+
+	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+	hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+	if (!pskb_may_pull(skb, hdr_len))
+		return -EINVAL;
+
+	tpi->proto = greh->protocol;
+
+	options = (__be32 *)(greh + 1);
+	if (greh->flags & GRE_CSUM) {
+		if (check_checksum(skb)) {
+			*csum_err = true;
+			return -EINVAL;
+		}
+		options++;
+	}
+
+	if (greh->flags & GRE_KEY) {
+		tpi->key = *options;
+		options++;
+	} else
+		tpi->key = 0;
+
+	if (unlikely(greh->flags & GRE_SEQ)) {
+		tpi->seq = *options;
+		options++;
+	} else
+		tpi->seq = 0;
+
+	/* WCCP version 1 and 2 protocol decoding.
+	 * - Change protocol to IP
+	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+	 */
+	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+		tpi->proto = htons(ETH_P_IP);
+		if ((*(u8 *)options & 0xF0) != 0x40) {
+			hdr_len += 4;
+			if (!pskb_may_pull(skb, hdr_len))
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int gre_cisco_rcv(struct sk_buff *skb)
+{
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+	struct gre_cisco_protocol *proto;
+
+	rcu_read_lock();
+	proto = rcu_dereference(gre_cisco_proto);
+	if (!proto)
+		goto drop;
+
+	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+		goto drop;
+	proto->handler(skb, &tpi);
+	rcu_read_unlock();
+	return 0;
+
+drop:
+	rcu_read_unlock();
+	kfree_skb(skb);
+	return 0;
+}
+
+int gre_cisco_register(struct gre_cisco_protocol *newp)
+{
+	int err;
+
+	spin_lock(&gre_proto_lock);
+	if (rcu_dereference(gre_cisco_proto)) {
+		err = -EEXIST;
+		goto out;
+	}
+	err = 0;
+	rcu_assign_pointer(gre_cisco_proto, newp);
+out:
+	spin_unlock(&gre_proto_lock);
+	return 0;
+}
+
+void gre_cisco_unregister(struct gre_cisco_protocol *proto)
+{
+	spin_lock(&gre_proto_lock);
+	rcu_assign_pointer(gre_cisco_proto, NULL);
+	spin_unlock(&gre_proto_lock);
+	synchronize_net();
+}
+
+static const struct gre_protocol ipgre_protocol = {
+	.handler	=	gre_cisco_rcv,
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
+
+int gre_add_protocol(const struct gre_protocol *proto, u8 version)
+{
+	if (version >= GREPROTO_MAX)
+		goto err_out;
+
+	spin_lock(&gre_proto_lock);
+	if (gre_proto[version])
+		goto err_out_unlock;
+
+	rcu_assign_pointer(gre_proto[version], proto);
+	spin_unlock(&gre_proto_lock);
+	return 0;
+
+err_out_unlock:
+	spin_unlock(&gre_proto_lock);
+err_out:
+	return -1;
+}
+
+int gre_del_protocol(const struct gre_protocol *proto, u8 version)
+{
+	if (version >= GREPROTO_MAX)
+		goto err_out;
+
+	spin_lock(&gre_proto_lock);
+	if (rcu_dereference_protected(gre_proto[version],
+				lockdep_is_held(&gre_proto_lock)) != proto)
+		goto err_out_unlock;
+	rcu_assign_pointer(gre_proto[version], NULL);
+	spin_unlock(&gre_proto_lock);
+	synchronize_rcu();
+	return 0;
+
+err_out_unlock:
+	spin_unlock(&gre_proto_lock);
+err_out:
+	return -1;
+}
+
+static int gre_rcv(struct sk_buff *skb)
+{
+	const struct gre_protocol *proto;
+	u8 ver;
+	int ret;
+
+	printk("pbs: %d %s\n",__LINE__,__func__);
+	if (!pskb_may_pull(skb, 12))
+		goto drop;
+
+	ver = skb->data[1]&0x7f;
+	if (ver >= GREPROTO_MAX)
+		goto drop;
+
+	rcu_read_lock();
+	proto = rcu_dereference(gre_proto[ver]);
+	if (!proto || !proto->handler)
+		goto drop_unlock;
+	ret = proto->handler(skb);
+	rcu_read_unlock();
+	return ret;
+
+drop_unlock:
+	rcu_read_unlock();
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static const struct net_protocol net_gre_protocol = {
+        .handler     = gre_rcv,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+        .netns_ok    = 1,
+#endif
+};
+
+int gre_compat_init(void)
+{
+        pr_info("GRE over IPv4 demultiplexor driver\n");
+
+        if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
+                pr_err("can't add protocol\n");
+                return -EAGAIN;
+        }
+
+        return 0;
+}
+
+void gre_compat_exit(void)
+{
+        inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+}
+
+#else
+int gre_compat_init(void)
+{
+	int err;
+
+	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
+	if (err)
+		pr_warn("cannot register gre protocol handler\n");
+
+	return err;
+}
+
+void gre_compat_exit(void)
+{
+	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
+}
+#endif
+#endif
diff --git a/datapath/linux/compat/include/linux/netdevice.h b/datapath/linux/compat/include/linux/netdevice.h
index 71aad87..4ffdac4 100644
--- a/datapath/linux/compat/include/linux/netdevice.h
+++ b/datapath/linux/compat/include/linux/netdevice.h
@@ -167,4 +167,30 @@ static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 }
 #endif
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+static inline struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
+						netdev_features_t features,
+						bool tx_path)
+{
+	struct sk_buff *nskb;
+	char cb[48];
+
+	/* From 3.9 kernel skb->cb is used by skb gso. Therefore
+	 * make copy of it to restore it. */
+
+	memcpy(cb, skb->cb, sizeof(skb->cb));
+	nskb = __skb_gso_segment(skb, 0, false);
+	if (IS_ERR(nskb))
+		return nskb;
+
+	skb = nskb;
+	while (nskb) {
+		memcpy(nskb->cb, cb, sizeof(nskb->cb));
+		nskb = nskb->next;
+	}
+	return skb;
+}
+#define __skb_gso_segment rpl__skb_gso_segment
+#endif
+
 #endif
diff --git a/datapath/linux/compat/include/linux/skbuff.h b/datapath/linux/compat/include/linux/skbuff.h
index d485b39..43c6d49 100644
--- a/datapath/linux/compat/include/linux/skbuff.h
+++ b/datapath/linux/compat/include/linux/skbuff.h
@@ -88,7 +88,12 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len)
 	return false;
 }
 #endif
-
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+{
+	return 0;
+}
+#endif
 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 {
diff --git a/datapath/linux/compat/include/net/gre.h b/datapath/linux/compat/include/net/gre.h
new file mode 100644
index 0000000..94a29c1
--- /dev/null
+++ b/datapath/linux/compat/include/net/gre.h
@@ -0,0 +1,118 @@
+#ifndef __LINUX_GRE_WRAPPER_H
+#define __LINUX_GRE_WRAPPER_H
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,37)
+#include_next <net/gre.h>
+#else
+#define GREPROTO_CISCO		0
+#define GREPROTO_PPTP		1
+#define GREPROTO_MAX		2
+#define GRE_IP_PROTO_MAX	2
+
+struct gre_protocol {
+	int  (*handler)(struct sk_buff *skb);
+	void (*err_handler)(struct sk_buff *skb, u32 info);
+};
+
+int gre_add_protocol(const struct gre_protocol *proto, u8 version);
+int gre_del_protocol(const struct gre_protocol *proto, u8 version);
+
+#endif
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+
+#define GRE_IP_PROTO_MAX	2
+
+struct gre_base_hdr {
+	__be16 flags;
+	__be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
+#define MAX_GRE_PROTO_PRIORITY 255
+struct gre_cisco_protocol {
+	int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
+	int (*err_handler)(struct sk_buff *skb, u32 info,
+			   const struct tnl_ptk_info *tpi);
+	u8 priority;
+};
+
+void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+		      int hdr_len);
+struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+int gre_cisco_register(struct gre_cisco_protocol *proto);
+void gre_cisco_unregister(struct gre_cisco_protocol *proto);
+int __init gre_compat_init(void);
+void gre_compat_exit(void);
+
+static inline int ip_gre_calc_hlen(__be16 o_flags)
+{
+	int addend = 4;
+
+	if (o_flags&TUNNEL_CSUM)
+		addend += 4;
+	if (o_flags&TUNNEL_KEY)
+		addend += 4;
+	if (o_flags&TUNNEL_SEQ)
+		addend += 4;
+	return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+	__be16 tflags = 0;
+
+	if (flags & GRE_CSUM)
+		tflags |= TUNNEL_CSUM;
+	if (flags & GRE_ROUTING)
+		tflags |= TUNNEL_ROUTING;
+	if (flags & GRE_KEY)
+		tflags |= TUNNEL_KEY;
+	if (flags & GRE_SEQ)
+		tflags |= TUNNEL_SEQ;
+	if (flags & GRE_STRICT)
+		tflags |= TUNNEL_STRICT;
+	if (flags & GRE_REC)
+		tflags |= TUNNEL_REC;
+	if (flags & GRE_VERSION)
+		tflags |= TUNNEL_VERSION;
+
+	return tflags;
+}
+
+static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+	__be16 flags = 0;
+
+	if (tflags & TUNNEL_CSUM)
+		flags |= GRE_CSUM;
+	if (tflags & TUNNEL_ROUTING)
+		flags |= GRE_ROUTING;
+	if (tflags & TUNNEL_KEY)
+		flags |= GRE_KEY;
+	if (tflags & TUNNEL_SEQ)
+		flags |= GRE_SEQ;
+	if (tflags & TUNNEL_STRICT)
+		flags |= GRE_STRICT;
+	if (tflags & TUNNEL_REC)
+		flags |= GRE_REC;
+	if (tflags & TUNNEL_VERSION)
+		flags |= GRE_VERSION;
+
+	return flags;
+}
+#else
+static inline int __init gre_compat_init(void)
+{
+	return 0;
+}
+static inline void gre_compat_exit(void)
+{
+
+}
+#endif
+#endif
diff --git a/datapath/linux/compat/include/net/ip_tunnels.h b/datapath/linux/compat/include/net/ip_tunnels.h
new file mode 100644
index 0000000..16667e9
--- /dev/null
+++ b/datapath/linux/compat/include/net/ip_tunnels.h
@@ -0,0 +1,69 @@
+#ifndef __NET_IP_TUNNELS_WRAPPER_H
+#define __NET_IP_TUNNELS_WRAPPER_H 1
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,9,0)
+#include_next <net/ip_tunnels.h>
+#else
+#include <linux/if_tunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+
+#if 0
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#endif
+
+#define TUNNEL_CSUM	__cpu_to_be16(0x01)
+#define TUNNEL_ROUTING	__cpu_to_be16(0x02)
+#define TUNNEL_KEY	__cpu_to_be16(0x04)
+#define TUNNEL_SEQ	__cpu_to_be16(0x08)
+#define TUNNEL_STRICT	__cpu_to_be16(0x10)
+#define TUNNEL_REC	__cpu_to_be16(0x20)
+#define TUNNEL_VERSION	__cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY	__cpu_to_be16(0x80)
+
+struct tnl_ptk_info {
+	__be16 flags;
+	__be16 proto;
+	__be32 key;
+	__be32 seq;
+};
+
+#define PACKET_RCVD	0
+#define PACKET_REJECT	1
+
+static inline void tunnel_ip_select_ident(struct sk_buff *skb,
+					  const struct iphdr  *old_iph,
+					  struct dst_entry *dst)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* Use inner packet iph-id if possible. */
+	if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
+		iph->id	= old_iph->id;
+	else
+		__ip_select_ident(iph, dst,
+				  (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+}
+
+static inline u16 tunnel_src_port(__u16 port_max, __u16 port_min,
+				  struct sk_buff *skb)
+{
+	unsigned int range = (port_max - port_min) + 1;
+	u32 hash;
+
+	hash = skb_get_rxhash(skb);
+	if (!hash)
+		hash = jhash(skb->data, 2 * ETH_ALEN,
+			     (__force u32) skb->protocol);
+
+	return (((u64) hash * range) >> 32) + port_min;
+}
+#endif
+#endif /* __NET_IP_TUNNELS_H */
diff --git a/datapath/linux/compat/include/net/vxlan.h b/datapath/linux/compat/include/net/vxlan.h
new file mode 100644
index 0000000..3ad26bb
--- /dev/null
+++ b/datapath/linux/compat/include/net/vxlan.h
@@ -0,0 +1,45 @@
+#ifndef __NET_IP_VXLAN_WRAPPER_H
+#define __NET_IP_VXLAN_WRAPPER_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,9,0)
+#include_next <net/vxlan.h>
+
+static inline int vxlan_compat_init(void)
+{
+	return 0;
+}
+static inline void vxlan_compat_cleanup(void)
+{
+
+}
+#else
+
+/* VXLAN protocol header */
+struct vxlanhdr {
+	__be32 vx_flags;
+	__be32 vx_vni;
+};
+
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+struct vxlan_port {
+	int (*vx_rcv)(struct vxlan_port *port, struct sk_buff *skb, __be32 key);
+	void *user_data;
+	struct socket *sock;
+	__be16 portno;
+};
+
+struct sk_buff *vxlan_build_header(const struct vxlan_port *port,
+				   __u16 src_port, struct sk_buff *skb,
+				   __be32 vni);
+int vxlan_add_handler(struct net *net, struct vxlan_port *);
+void vxlan_del_handler(struct net *net, const struct vxlan_port *port);
+
+int vxlan_compat_init(void);
+void vxlan_compat_cleanup(void);
+#endif
+#endif
diff --git a/datapath/linux/compat/vxlan.c b/datapath/linux/compat/vxlan.c
new file mode 100644
index 0000000..26413c1
--- /dev/null
+++ b/datapath/linux/compat/vxlan.c
@@ -0,0 +1,311 @@
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/hash.h>
+#include <linux/ethtool.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0) && \
+    LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+
+#define VXLAN_FLAGS 0x08000000	/* struct vxlanhdr.vx_flags required value. */
+
+static int vxlan_net_id;
+
+static DEFINE_MUTEX(vxlan_mutex);
+
+#define MAX_VXLAN_PORTS	8
+struct vxlan_net {
+	struct vxlan_port __rcu *vxlan_ports[MAX_VXLAN_PORTS];
+};
+
+static struct pernet_operations vxlan_net_ops = {
+	.id   = &vxlan_net_id,
+	.size = sizeof(struct vxlan_net),
+};
+static unsigned int port_range;
+static int port_low;
+static int port_high;
+
+int vxlan_compat_init(void)
+{
+	inet_get_local_port_range(&port_low, &port_high);
+	port_range = (port_high - port_low) + 1;
+
+	return register_pernet_device(&vxlan_net_ops);
+}
+
+void vxlan_compat_cleanup(void)
+{
+	unregister_pernet_device(&vxlan_net_ops);
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+	struct vxlan_net *vn = net_generic(dev_net(skb->dev), vxlan_net_id);
+	struct vxlanhdr *vxh;
+	int i;
+
+	/* pop off outer UDP header */
+	__skb_pull(skb, sizeof(struct udphdr));
+
+	/* Need Vxlan and inner Ethernet header to be present */
+	if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+		goto error;
+
+	/* Drop packets with reserved bits set */
+	vxh = (struct vxlanhdr *) skb->data;
+	if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
+	    (vxh->vx_vni & htonl(0xff))) {
+		netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+			   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+		goto error;
+	}
+
+	__skb_pull(skb, sizeof(struct vxlanhdr));
+
+	rcu_read_lock();
+	for (i = 0; i < MAX_VXLAN_PORTS; i++) {
+		struct vxlan_port *port = rcu_dereference(vn->vxlan_ports[i]);
+		int ret;
+
+		if (!port)
+			continue;
+		if (port->portno != udp_hdr(skb)->dest)
+			continue;
+
+		ret = port->vx_rcv(port, skb, vxh->vx_vni);
+		if (ret == PACKET_RCVD) {
+			rcu_read_unlock();
+			return 0;
+		}
+	}
+	rcu_read_unlock();
+error:
+	/*Put UDP header hack*/
+	__skb_push(skb, sizeof(struct udphdr));
+	return 1;
+}
+
+struct socket *vxlan_create_socket(struct net *net, __be16 portno)
+{
+	struct socket *sock;
+	struct sock *sk;
+	struct sockaddr_in vxlan_addr = {
+		.sin_family = AF_INET,
+		.sin_addr.s_addr = htonl(INADDR_ANY),
+	};
+	int rc;
+
+	/* Create UDP socket for encapsulation receive. */
+	rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	if (rc < 0) {
+		pr_debug("UDP socket create failed\n");
+		return ERR_PTR(rc);
+	}
+	/* Put in proper namespace */
+	sk = sock->sk;
+	sk_change_net(sk, net);
+
+	vxlan_addr.sin_port = portno;
+
+	rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
+			 sizeof(vxlan_addr));
+	if (rc < 0) {
+		pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+			 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+		sk_release_kernel(sk);
+		return ERR_PTR(rc);
+	}
+
+	/* Disable multicast loopback */
+	inet_sk(sk)->mc_loop = 0;
+
+	/* Mark socket as an encapsulation socket. */
+	udp_sk(sk)->encap_type = 1;
+	udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+	udp_encap_enable();
+
+	return sock;
+}
+
+int vxlan_add_handler(struct net *net, struct vxlan_port *new)
+{
+	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+	struct socket *sock = NULL;
+	int i, p = -1;
+	int err;
+
+	mutex_lock(&vxlan_mutex);
+	for (i = 0; i < MAX_VXLAN_PORTS; i++) {
+		struct vxlan_port *port = vn->vxlan_ports[i];
+
+		if (!port) {
+			if (p < 0)
+				p = i;
+			continue;
+		}
+		if (port->portno == new->portno)
+			sock = port->sock;
+	}
+
+	if (p < 0) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (!sock) {
+		sock = vxlan_create_socket(net, new->portno);
+		if (IS_ERR(sock)) {
+			err = PTR_ERR(sock);
+			goto out;
+		}
+	}
+
+	new->sock = sock;
+	rcu_assign_pointer(vn->vxlan_ports[p], new);
+	err = 0;
+out:
+	mutex_unlock(&vxlan_mutex);
+	return err;
+}
+
+void vxlan_del_handler(struct net *net, const struct vxlan_port *del)
+{
+	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+	bool inuse = false;
+	int i;
+
+	mutex_lock(&vxlan_mutex);
+
+	/* check if sock is still used. */
+	for (i = 0; i < MAX_VXLAN_PORTS; i++) {
+		struct vxlan_port *port = vn->vxlan_ports[i];
+
+		if (!port)
+			continue;
+
+		if (port == del) {
+			rcu_assign_pointer(vn->vxlan_ports[i], NULL);
+			synchronize_net();
+			continue;
+		}
+		if (port->portno == del->portno)
+			inuse = true;
+	}
+
+	if (!inuse)
+		sk_release_kernel(del->sock->sk);
+	mutex_unlock(&vxlan_mutex);
+}
+
+static void vxlan_sock_free(struct sk_buff *skb)
+{
+	sock_put(skb->sk);
+}
+
+/* On transmit, associate with the tunnel socket */
+static void vxlan_set_owner(const struct vxlan_port *port, struct sk_buff *skb)
+{
+	struct sock *sk = port->sock->sk;
+
+	skb_orphan(skb);
+	sock_hold(sk);
+	skb->sk = sk;
+	skb->destructor = vxlan_sock_free;
+}
+
+#ifdef HAVE_SKB_GSO_UDP_TUNNEL
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
+{
+	if (likely(!skb->encapsulation)) {
+		skb_reset_inner_headers(skb);
+		skb->encapsulation = 1;
+	}
+
+	if (skb_is_gso(skb)) {
+		int err = skb_unclone(skb, GFP_ATOMIC);
+		if (unlikely(err)) {
+			kfree_skb(skb);
+			return ERR_PTR(err);
+		}
+
+		skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
+	}
+
+	return skb;
+}
+#else
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
+{
+	if (skb_is_gso(skb)) {
+		struct sk_buff *nskb;
+
+		nskb = __skb_gso_segment(skb, 0, false);
+		if (IS_ERR(nskb)) {
+			kfree_skb(skb);
+			return nskb;
+		}
+
+		consume_skb(skb);
+		skb = nskb;
+	}
+	return skb;
+}
+#endif
+
+struct sk_buff *vxlan_build_header(const struct vxlan_port *port,
+				   __u16 src_port, struct sk_buff *skb,
+				   __be32 vni)
+{
+	struct vxlanhdr *vxh;
+	struct udphdr *uh;
+
+	skb = handle_offloads(skb);
+	if (IS_ERR(skb))
+		return NULL;
+
+	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+	vxh->vx_flags = htonl(VXLAN_FLAGS);
+	vxh->vx_vni = vni;
+
+	__skb_push(skb, sizeof(*uh));
+	skb_reset_transport_header(skb);
+	uh = udp_hdr(skb);
+
+	uh->dest = port->portno;
+	uh->source = htons(src_port);
+
+	uh->len = htons(skb->len);
+	uh->check = 0;
+
+	vxlan_set_owner(port, skb);
+
+	return skb;
+}
+#endif
diff --git a/datapath/tunnel.c b/datapath/tunnel.c
index 057aaed..bd0ee7e 100644
--- a/datapath/tunnel.c
+++ b/datapath/tunnel.c
@@ -28,6 +28,7 @@
 #include <linux/workqueue.h>
 #include <linux/rculist.h>
 #include <net/route.h>
+#include <net/ip_tunnels.h>
 #include <net/xfrm.h>
 
 #include "checksum.h"
@@ -84,9 +85,8 @@ void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
 	ovs_vport_receive(vport, skb);
 }
 
-static struct rtable *find_route(struct net *net,
-		__be32 *saddr, __be32 daddr, u8 ipproto,
-		u8 tos, u32 skb_mark)
+static struct rtable *find_route(struct net *net, __be32 *saddr, __be32 daddr,
+				 u8 ipproto, u8 tos, u32 skb_mark)
 {
 	struct rtable *rt;
 	/* Tunnel configuration keeps DSCP part of TOS bits, But Linux
@@ -141,42 +141,10 @@ static bool need_linearize(const struct sk_buff *skb)
 	return false;
 }
 
-static struct sk_buff *handle_offloads(struct sk_buff *skb,
-				       const struct rtable *rt,
-				       int tunnel_hlen)
+static int ovs_skb_checksum(struct sk_buff *skb)
 {
-	int min_headroom;
-	int err;
-
-	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
-			+ tunnel_hlen
-			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
-
-	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
-		int head_delta = SKB_DATA_ALIGN(min_headroom -
-						skb_headroom(skb) +
-						16);
-		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
-					0, GFP_ATOMIC);
-		if (unlikely(err))
-			goto error_free;
-	}
-
-	forward_ip_summed(skb, true);
-
-	if (skb_is_gso(skb)) {
-		struct sk_buff *nskb;
-
-		nskb = __skb_gso_segment(skb, 0, false);
-		if (IS_ERR(nskb)) {
-			kfree_skb(skb);
-			err = PTR_ERR(nskb);
-			goto error;
-		}
-
-		consume_skb(skb);
-		skb = nskb;
-	} else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
+	int err = 0;
+	if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
 		/* Pages aren't locked and could change at any time.
 		 * If this happens after we compute the checksum, the
 		 * checksum will be wrong.  We linearize now to avoid
@@ -192,182 +160,193 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb,
 		if (unlikely(err))
 			goto error_free;
 	}
-
 	set_ip_summed(skb, OVS_CSUM_NONE);
 
-	return skb;
-
+	return 0;
 error_free:
 	kfree_skb(skb);
-error:
-	return ERR_PTR(err);
+	return err;
 }
 
-/* Compute source UDP port for outgoing packet.
- * Currently we use the flow hash.
- */
-u16 ovs_tnl_get_src_port(struct sk_buff *skb)
+static int tnl_send_out(struct vport *vport, struct sk_buff *skb,
+			int tunnel_hlen,
+			struct sk_buff *(*build_header)(const struct vport *,
+							struct sk_buff *,
+							int tunnel_hlen))
 {
-	int low;
-	int high;
-	unsigned int range;
-	u32 hash = OVS_CB(skb)->flow->hash;
+	int sent_len = 0;
 
-	inet_get_local_port_range(&low, &high);
-	range = (high - low) + 1;
-	return (((u64) hash * range) >> 32) + low;
+	while (skb) {
+		struct sk_buff *next_skb = skb->next;
+		int frag_len;
+		int err;
+
+		skb->next = NULL;
+		if (next_skb) {
+			struct iphdr *iph;
+			struct dst_entry *dst = skb_dst(skb);
+
+			skb_dst_set(next_skb, dst_clone(dst));
+			/* Prepare next skb from current skb. */
+			if (OVS_CB(skb)->tun_key->tun_flags & OVS_TNL_F_CSUM) {
+				struct sk_buff *skb2;
+
+				/* Build Tunnel header. */
+				skb2 = next_skb->next;
+				next_skb = build_header(vport, next_skb, tunnel_hlen);
+				if (!next_skb)
+					next_skb = skb2;
+			} else {
+				/* Copy Tunnel header. */
+				skb_push(next_skb, tunnel_hlen);
+				memcpy(next_skb->data, skb_transport_header(skb), tunnel_hlen);
+			}
+			/* Copy IP header. */
+			skb_push(next_skb, sizeof(struct iphdr));
+			memcpy(next_skb->data, skb->data, sizeof(struct iphdr));
+
+			/* Reset offset to outer header. */
+			skb_reset_network_header(next_skb);
+			skb_set_transport_header(next_skb, sizeof(struct iphdr));
+
+			/* Update IP identification. */
+			iph = ip_hdr(next_skb);
+			iph->id = htons(ntohs(ip_hdr(skb)->id) + 1);
+
+			next_skb->local_df = 1;
+		}
+
+		if (!skb_is_gso(skb)) {
+			err = ovs_skb_checksum(skb);
+			if (err)
+				goto next;
+			set_ip_summed(skb, OVS_CSUM_NONE);
+		}
+
+		frag_len = skb->len;
+		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+		err = ip_local_out(skb);
+		if (unlikely(net_xmit_eval(err)))
+			goto next;
+
+		sent_len += frag_len;
+
+next:
+		skb = next_skb;
+	}
+	return sent_len;
 }
 
-int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb, u8 ipproto,
+		 int tunnel_hlen,
+		 struct sk_buff *(*build_header)(const struct vport *,
+						 struct sk_buff *,
+						 int tunnel_hlen))
 {
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+	struct iphdr *inner_iph;
+	struct iphdr *iph;
 	struct rtable *rt;
 	__be32 saddr;
 	int sent_len = 0;
-	int tunnel_hlen;
+	int min_headroom;
 
 	if (unlikely(!OVS_CB(skb)->tun_key))
 		goto error_free;
 
+	if (unlikely(vlan_deaccel_tag(skb)))
+		goto error;
+
 	/* Route lookup */
 	saddr = OVS_CB(skb)->tun_key->ipv4_src;
 	rt = find_route(ovs_dp_get_net(vport->dp),
 			&saddr,
 			OVS_CB(skb)->tun_key->ipv4_dst,
-			tnl_vport->tnl_ops->ipproto,
+			ipproto,
 			OVS_CB(skb)->tun_key->ipv4_tos,
 			skb_get_mark(skb));
 	if (IS_ERR(rt))
 		goto error_free;
 
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt_dst(rt));
+
 	/* Offloading */
-	tunnel_hlen = tnl_vport->tnl_ops->hdr_len(OVS_CB(skb)->tun_key);
-	tunnel_hlen += sizeof(struct iphdr);
+	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
+			+ tunnel_hlen + sizeof(struct iphdr)
+			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
 
-	skb = handle_offloads(skb, rt, tunnel_hlen);
-	if (IS_ERR(skb)) {
-		skb = NULL;
-		goto err_free_rt;
+	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+		int err;
+		int head_delta = SKB_DATA_ALIGN(min_headroom -
+						skb_headroom(skb) +
+						16);
+		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+					0, GFP_ATOMIC);
+		if (unlikely(err))
+			goto error_free;
 	}
+	forward_ip_summed(skb, true);
+
+	/* Push Tunnel header. */
+	skb = build_header(vport, skb, tunnel_hlen);
+	if (!skb)
+		goto error;
+
+	inner_iph = ip_hdr(skb);
 
 	/* Reset SKB */
 	nf_reset(skb);
 	secpath_reset(skb);
-	skb_dst_drop(skb);
 	skb_clear_rxhash(skb);
 
-	while (skb) {
-		struct sk_buff *next_skb = skb->next;
-		struct iphdr *iph;
-		int frag_len;
-		int err;
-
-		skb->next = NULL;
-
-		if (unlikely(vlan_deaccel_tag(skb)))
-			goto next;
-
-		frag_len = skb->len;
-		skb_push(skb, tunnel_hlen);
-		skb_reset_network_header(skb);
-		skb_set_transport_header(skb, sizeof(struct iphdr));
-
-		if (next_skb)
-			skb_dst_set(skb, dst_clone(&rt_dst(rt)));
-		else
-			skb_dst_set(skb, &rt_dst(rt));
-
-		/* Push Tunnel header. */
-		tnl_vport->tnl_ops->build_header(vport, skb, tunnel_hlen);
-
-		/* Push IP header. */
-		iph = ip_hdr(skb);
-		iph->version	= 4;
-		iph->ihl	= sizeof(struct iphdr) >> 2;
-		iph->protocol	= tnl_vport->tnl_ops->ipproto;
-		iph->daddr	= OVS_CB(skb)->tun_key->ipv4_dst;
-		iph->saddr	= saddr;
-		iph->tos	= OVS_CB(skb)->tun_key->ipv4_tos;
-		iph->ttl	= OVS_CB(skb)->tun_key->ipv4_ttl;
-		iph->frag_off	= OVS_CB(skb)->tun_key->tun_flags &
-				  OVS_TNL_F_DONT_FRAGMENT ?  htons(IP_DF) : 0;
-		/*
-		 * Allow our local IP stack to fragment the outer packet even
-		 * if the DF bit is set as a last resort.  We also need to
-		 * force selection of an IP ID here with __ip_select_ident(),
-		 * as ip_select_ident() assumes a proper ID is not needed when
-		 * when the DF bit is set.
-		 */
-		skb->local_df = 1;
-		__ip_select_ident(iph, skb_dst(skb), 0);
-
-		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-
-		err = ip_local_out(skb);
-		if (unlikely(net_xmit_eval(err)))
-			goto next;
-
-		sent_len += frag_len;
+	skb_push(skb, sizeof(struct iphdr));
+	skb_reset_network_header(skb);
+	skb_set_transport_header(skb, sizeof(struct iphdr));
+
+	/* Push IP header. */
+	iph = ip_hdr(skb);
+	iph->version	= 4;
+	iph->ihl	= sizeof(struct iphdr) >> 2;
+	iph->protocol	= ipproto;
+	iph->daddr	= OVS_CB(skb)->tun_key->ipv4_dst;
+	iph->saddr	= saddr;
+	iph->tos	= OVS_CB(skb)->tun_key->ipv4_tos;
+	iph->ttl	= OVS_CB(skb)->tun_key->ipv4_ttl;
+	iph->frag_off	= OVS_CB(skb)->tun_key->tun_flags &
+
+				OVS_TNL_F_DONT_FRAGMENT ?  htons(IP_DF) : 0;
+	/*
+	 * Allow our local IP stack to fragment the outer packet even
+	 * if the DF bit is set as a last resort.  We also need to
+	 * force selection of an IP ID here with __ip_select_ident(),
+	 * as ip_select_ident() assumes a proper ID is not needed when
+	 * when the DF bit is set.
+	 */
+	skb->local_df = 1;
+	__ip_select_ident(iph, skb_dst(skb), 0);
 
-next:
-		skb = next_skb;
-	}
+	tunnel_ip_select_ident(skb, inner_iph, &rt_dst(rt));
+	sent_len = tnl_send_out(vport, skb, tunnel_hlen, build_header);
 
 	if (unlikely(sent_len == 0))
 		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
 
 	return sent_len;
 
-err_free_rt:
-	ip_rt_put(rt);
 error_free:
 	kfree_skb(skb);
+error:
 	ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
 	return sent_len;
 }
 
-struct vport *ovs_tnl_create(const struct vport_parms *parms,
-			     const struct vport_ops *vport_ops,
-			     const struct tnl_ops *tnl_ops)
-{
-	struct vport *vport;
-	struct tnl_vport *tnl_vport;
-	int err;
-
-	vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
-	if (IS_ERR(vport)) {
-		err = PTR_ERR(vport);
-		goto error;
-	}
-
-	tnl_vport = tnl_vport_priv(vport);
-
-	strcpy(tnl_vport->name, parms->name);
-	tnl_vport->tnl_ops = tnl_ops;
-
-	return vport;
-
-error:
-	return ERR_PTR(err);
-}
-
-static void free_port_rcu(struct rcu_head *rcu)
-{
-	struct tnl_vport *tnl_vport = container_of(rcu,
-						   struct tnl_vport, rcu);
-
-	ovs_vport_free(vport_from_priv(tnl_vport));
-}
-
-void ovs_tnl_destroy(struct vport *vport)
+u16 ovs_tunnel_source_port(struct sk_buff *skb)
 {
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-
-	call_rcu(&tnl_vport->rcu, free_port_rcu);
-}
+	unsigned int range;
+	int low;
+	int high;
 
-const char *ovs_tnl_get_name(const struct vport *vport)
-{
-	const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-	return tnl_vport->name;
+	inet_get_local_port_range(&low, &high);
+	range = (high - low) + 1;
+	return tunnel_src_port(high, low, skb);
 }
diff --git a/datapath/tunnel.h b/datapath/tunnel.h
index e853146..f666e9a 100644
--- a/datapath/tunnel.h
+++ b/datapath/tunnel.h
@@ -26,44 +26,12 @@
 #include "flow.h"
 #include "vport.h"
 
-struct tnl_ops {
-	u8 ipproto;		/* The IP protocol for the tunnel. */
-
-	/*
-	 * Returns the length of the tunnel header that will be added in
-	 * build_header() (i.e. excludes the IP header).
-	 */
-	int (*hdr_len)(const struct ovs_key_ipv4_tunnel *);
-	/*
-	* Builds header for given SKB.  Space will have already been
-	* allocated at the start of the packet equal
-	* to sizeof(struct iphdr) + value returned by hdr_len().
-	*/
-	void (*build_header)(const struct vport *, struct sk_buff *,
-			     int tunnel_hlen);
-};
-
-struct tnl_vport {
-	struct rcu_head rcu;
-
-	__be16 dst_port;
-	char name[IFNAMSIZ];
-	const struct tnl_ops *tnl_ops;
-};
-
-struct vport *ovs_tnl_create(const struct vport_parms *, const struct vport_ops *,
-			     const struct tnl_ops *);
-void ovs_tnl_destroy(struct vport *);
-
-const char *ovs_tnl_get_name(const struct vport *vport);
-int ovs_tnl_send(struct vport *vport, struct sk_buff *skb);
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb,
+		  u8 ipproto, int tunnel_hlen,
+		  struct sk_buff *(*build_header)(const struct vport *, struct sk_buff *,
+						  int tunnel_hlen));
 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb);
-u16 ovs_tnl_get_src_port(struct sk_buff *skb);
-
-static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
-{
-	return vport_priv(vport);
-}
+u16 ovs_tunnel_source_port(struct sk_buff *skb);
 
 static inline void tnl_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
 				    const struct iphdr *iph, __be64 tun_id, u32 tun_flags)
diff --git a/datapath/vport-gre.c b/datapath/vport-gre.c
index 40b96cf..3a722a1 100644
--- a/datapath/vport-gre.c
+++ b/datapath/vport-gre.c
@@ -25,25 +25,16 @@
 #include <linux/if_vlan.h>
 #include <linux/in.h>
 
+#include <net/gre.h>
 #include <net/icmp.h>
 #include <net/ip.h>
+#include <net/ip_tunnels.h>
 #include <net/protocol.h>
 
 #include "datapath.h"
 #include "tunnel.h"
 #include "vport.h"
 
-/*
- * The GRE header is composed of a series of sections: a base and then a variable
- * number of options.
- */
-#define GRE_HEADER_SECTION 4
-
-struct gre_base_hdr {
-	__be16 flags;
-	__be16 protocol;
-};
-
 static int gre_hdr_len(const struct ovs_key_ipv4_tunnel *tun_key)
 {
 	int len = GRE_HEADER_SECTION;
@@ -87,52 +78,53 @@ static __be32 be64_get_high32(__be64 x)
 #endif
 }
 
-static void __gre_build_header(struct sk_buff *skb,
+static __be16 ovs_tnl_flags_to_gre_flags(u16 tun_flags, bool is_gre64)
+{
+	__be16 flags = 0;
+
+	if (tun_flags & OVS_TNL_F_CSUM)
+		flags |= TUNNEL_CSUM;
+	if (tun_flags & OVS_TNL_F_KEY)
+		flags |= TUNNEL_KEY;
+
+	if (is_gre64)
+		flags |= (TUNNEL_SEQ | TUNNEL_KEY);
+
+	return flags;
+}
+
+static struct sk_buff *__gre_build_header(struct sk_buff *skb,
 			       int tunnel_hlen,
 			       bool is_gre64)
 {
 	const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
-	__be32 *options = (__be32 *)(skb_network_header(skb) + tunnel_hlen
-			- GRE_HEADER_SECTION);
-	struct gre_base_hdr *greh = (struct gre_base_hdr *) skb_transport_header(skb);
-	greh->protocol = htons(ETH_P_TEB);
-	greh->flags = 0;
-
-	/* Work backwards over the options so the checksum is last. */
-	if (tun_key->tun_flags & OVS_TNL_F_KEY || is_gre64) {
-		greh->flags |= GRE_KEY;
-		if (is_gre64) {
-			/* Set higher 32 bits to seq. */
-			*options = be64_get_high32(tun_key->tun_id);
-			options--;
-			greh->flags |= GRE_SEQ;
-		}
-		*options = be64_get_low32(tun_key->tun_id);
-		options--;
-	}
+	struct tnl_ptk_info tpi;
 
-	if (tun_key->tun_flags & OVS_TNL_F_CSUM) {
-		greh->flags |= GRE_CSUM;
-		*options = 0;
-		*(__sum16 *)options = csum_fold(skb_checksum(skb,
-						skb_transport_offset(skb),
-						skb->len - skb_transport_offset(skb),
-						0));
-	}
+	skb = gre_handle_offloads(skb, (tun_key->tun_flags & OVS_TNL_F_CSUM));
+	if (IS_ERR(skb))
+		return NULL;
+
+	tpi.flags = ovs_tnl_flags_to_gre_flags(tun_key->tun_flags, is_gre64);
+	tpi.proto = htons(ETH_P_TEB);
+	tpi.key = be64_get_low32(tun_key->tun_id);
+	tpi.seq = be64_get_high32(tun_key->tun_id);
+	gre_build_header(skb, &tpi, tunnel_hlen);
+
+	return skb;
 }
 
-static void gre_build_header(const struct vport *vport,
+static struct sk_buff *gre32_build_header(const struct vport *vport,
 			     struct sk_buff *skb,
 			     int tunnel_hlen)
 {
-	__gre_build_header(skb, tunnel_hlen, false);
+	return __gre_build_header(skb, tunnel_hlen, false);
 }
 
-static void gre64_build_header(const struct vport *vport,
+static struct sk_buff *gre64_build_header(const struct vport *vport,
 			       struct sk_buff *skb,
 			       int tunnel_hlen)
 {
-	__gre_build_header(skb, tunnel_hlen, true);
+	return __gre_build_header(skb, tunnel_hlen, true);
 }
 
 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
@@ -144,119 +136,48 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
 #endif
 }
 
-static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *tun_id,
-			bool *is_gre64)
+static int map_gre_ptk_info(const struct tnl_ptk_info *tpi, u32 *tnl_flags,
+			    __be64 *tun_id, bool *is_gre64)
 {
-	/* IP and ICMP protocol handlers check that the IHL is valid. */
-	struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
-	__be32 *options = (__be32 *)(greh + 1);
-	int hdr_len;
-
-	*flags = greh->flags;
-
-	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-		return -EINVAL;
+	int hdr_len = GRE_HEADER_SECTION;
 
-	if (unlikely(greh->protocol != htons(ETH_P_TEB)))
-		return -EINVAL;
-
-	hdr_len = GRE_HEADER_SECTION;
-
-	if (greh->flags & GRE_CSUM) {
+	*tnl_flags = 0;
+	if (tpi->flags & TUNNEL_CSUM) {
 		hdr_len += GRE_HEADER_SECTION;
-		options++;
+		*tnl_flags |= OVS_TNL_F_CSUM;
 	}
 
-	if (greh->flags & GRE_KEY) {
-		__be32 seq;
-		__be32 gre_key;
-
-		gre_key = *options;
+	if (tpi->flags & TUNNEL_KEY) {
 		hdr_len += GRE_HEADER_SECTION;
-		options++;
-
-		if (greh->flags & GRE_SEQ) {
-			seq = *options;
-			*is_gre64 = true;
-		} else {
-			seq = 0;
-			*is_gre64 = false;
-		}
-		*tun_id = key_to_tunnel_id(gre_key, seq);
+		*tun_id = key_to_tunnel_id(tpi->key, tpi->seq);
+		*tnl_flags |= OVS_TNL_F_KEY;
 	} else {
 		*tun_id = 0;
-		/* Ignore GRE seq if there is no key present. */
-		*is_gre64 = false;
 	}
 
-	if (greh->flags & GRE_SEQ)
+	if (tpi->flags & TUNNEL_SEQ) {
 		hdr_len += GRE_HEADER_SECTION;
-
-	return hdr_len;
-}
-
-static bool check_checksum(struct sk_buff *skb)
-{
-	struct iphdr *iph = ip_hdr(skb);
-	struct gre_base_hdr *greh = (struct gre_base_hdr *)(iph + 1);
-	__sum16 csum = 0;
-
-	if (greh->flags & GRE_CSUM) {
-		switch (skb->ip_summed) {
-		case CHECKSUM_COMPLETE:
-			csum = csum_fold(skb->csum);
-
-			if (!csum)
-				break;
-			/* Fall through. */
-
-		case CHECKSUM_NONE:
-			skb->csum = 0;
-			csum = __skb_checksum_complete(skb);
-			skb->ip_summed = CHECKSUM_COMPLETE;
-			break;
-		}
+		*is_gre64 = true;
+	} else {
+		*is_gre64 = false;
 	}
 
-	return (csum == 0);
-}
-
-static u32 gre_flags_to_tunnel_flags(__be16 gre_flags, bool is_gre64)
-{
-	u32 tunnel_flags = 0;
-
-	if (gre_flags & GRE_KEY || is_gre64)
-		tunnel_flags = OVS_TNL_F_KEY;
-
-	if (gre_flags & GRE_CSUM)
-		tunnel_flags |= OVS_TNL_F_CSUM;
-
-	return tunnel_flags;
+	return hdr_len;
 }
 
 /* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb)
+static int gre_rcv(struct sk_buff *skb,
+		   const struct tnl_ptk_info *tpi)
 {
-	struct ovs_net *ovs_net;
+	struct ovs_net *ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
 	struct vport *vport;
 	int hdr_len;
-	struct iphdr *iph;
 	struct ovs_key_ipv4_tunnel tun_key;
-	__be16 gre_flags;
 	u32 tnl_flags;
 	__be64 key;
 	bool is_gre64;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
-		goto error;
-	if (unlikely(!check_checksum(skb)))
-		goto error;
-
-	hdr_len = parse_header(ip_hdr(skb), &gre_flags, &key, &is_gre64);
-	if (unlikely(hdr_len < 0))
-		goto error;
-
-	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+	hdr_len = map_gre_ptk_info(tpi, &tnl_flags, &key, &is_gre64);
 	if (is_gre64)
 		vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
 	else
@@ -267,27 +188,23 @@ static int gre_rcv(struct sk_buff *skb)
 	if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
 		goto error;
 
-	iph = ip_hdr(skb);
-	tnl_flags = gre_flags_to_tunnel_flags(gre_flags, is_gre64);
-	tnl_tun_key_init(&tun_key, iph, key, tnl_flags);
+	tnl_tun_key_init(&tun_key, ip_hdr(skb), key, tnl_flags);
 	OVS_CB(skb)->tun_key = &tun_key;
 
 	__skb_pull(skb, hdr_len);
 	skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
 
 	ovs_tnl_rcv(vport, skb);
-	return 0;
+	return PACKET_RCVD;
 
 error:
 	kfree_skb(skb);
-	return 0;
+	return PACKET_RCVD;
 }
 
-static const struct net_protocol gre_protocol_handlers = {
-	.handler	=	gre_rcv,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
-	.netns_ok	=	1,
-#endif
+static struct gre_cisco_protocol ipgre_protocol = {
+	.handler        = gre_rcv,
+	.priority       = 1,
 };
 
 static bool inited;
@@ -300,7 +217,7 @@ static int gre_init(void)
 		return 0;
 
 	inited = true;
-	err = inet_add_protocol(&gre_protocol_handlers, IPPROTO_GRE);
+	err = gre_cisco_register(&ipgre_protocol);
 	if (err)
 		pr_warn("cannot register gre protocol handler\n");
 
@@ -314,16 +231,10 @@ static void gre_exit(void)
 
 	inited = false;
 
-	inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
+	gre_cisco_unregister(&ipgre_protocol);
 }
 
 /* GRE vport. */
-static const struct tnl_ops gre_tnl_ops = {
-	.ipproto	= IPPROTO_GRE,
-	.hdr_len	= gre_hdr_len,
-	.build_header	= gre_build_header,
-};
-
 static struct vport *gre_create(const struct vport_parms *parms)
 {
 	struct net *net = ovs_dp_get_net(parms->dp);
@@ -334,8 +245,11 @@ static struct vport *gre_create(const struct vport_parms *parms)
 	if (rtnl_dereference(ovs_net->vport_net.gre_vport))
 		return ERR_PTR(-EEXIST);
 
-	vport = ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
+	vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
+	strncpy(ovs_net->vport_net.gre_name, parms->name, IFNAMSIZ);
 	rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
 	return vport;
 }
@@ -348,7 +262,28 @@ static void gre_tnl_destroy(struct vport *vport)
 	ovs_net = net_generic(net, ovs_net_id);
 
 	rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
-	ovs_tnl_destroy(vport);
+	ovs_vport_deferred_destroy(vport);
+}
+
+static const char *gre_tnl_get_name(const struct vport *vport)
+{
+	struct net *net = ovs_dp_get_net(vport->dp);
+	struct ovs_net *ovs_net;
+
+	ovs_net = net_generic(net, ovs_net_id);
+
+	return ovs_net->vport_net.gre_name;
+}
+
+static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+	int hlen;
+
+	if (unlikely(!OVS_CB(skb)->tun_key))
+		return 0;
+
+	hlen = gre_hdr_len(OVS_CB(skb)->tun_key);
+	return ovs_tnl_send(vport, skb, IPPROTO_GRE, hlen, gre32_build_header);
 }
 
 const struct vport_ops ovs_gre_vport_ops = {
@@ -358,16 +293,11 @@ const struct vport_ops ovs_gre_vport_ops = {
 	.exit		= gre_exit,
 	.create		= gre_create,
 	.destroy	= gre_tnl_destroy,
-	.get_name	= ovs_tnl_get_name,
-	.send		= ovs_tnl_send,
+	.get_name	= gre_tnl_get_name,
+	.send		= gre_tnl_send,
 };
 
 /* GRE64 vport. */
-static const struct tnl_ops gre64_tnl_ops = {
-	.ipproto	= IPPROTO_GRE,
-	.hdr_len	= gre64_hdr_len,
-	.build_header	= gre64_build_header,
-};
 
 static struct vport *gre64_create(const struct vport_parms *parms)
 {
@@ -379,13 +309,15 @@ static struct vport *gre64_create(const struct vport_parms *parms)
 	if (rtnl_dereference(ovs_net->vport_net.gre64_vport))
 		return ERR_PTR(-EEXIST);
 
-	vport = ovs_tnl_create(parms, &ovs_gre64_vport_ops, &gre64_tnl_ops);
+	vport = ovs_vport_alloc(0, &ovs_gre64_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
+	strncpy(ovs_net->vport_net.gre64_name, parms->name, IFNAMSIZ);
 	rcu_assign_pointer(ovs_net->vport_net.gre64_vport, vport);
 	return vport;
 }
 
-
 static void gre64_tnl_destroy(struct vport *vport)
 {
 	struct net *net = ovs_dp_get_net(vport->dp);
@@ -394,7 +326,28 @@ static void gre64_tnl_destroy(struct vport *vport)
 	ovs_net = net_generic(net, ovs_net_id);
 
 	rcu_assign_pointer(ovs_net->vport_net.gre64_vport, NULL);
-	ovs_tnl_destroy(vport);
+	ovs_vport_deferred_destroy(vport);
+}
+
+static int gre64_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+	int hlen;
+
+	if (unlikely(!OVS_CB(skb)->tun_key))
+		return 0;
+
+	hlen = gre64_hdr_len(OVS_CB(skb)->tun_key);
+	return ovs_tnl_send(vport, skb, IPPROTO_GRE, hlen, gre64_build_header);
+}
+
+static const char *gre64_tnl_get_name(const struct vport *vport)
+{
+	struct net *net = ovs_dp_get_net(vport->dp);
+	struct ovs_net *ovs_net;
+
+	ovs_net = net_generic(net, ovs_net_id);
+
+	return ovs_net->vport_net.gre64_name;
 }
 
 const struct vport_ops ovs_gre64_vport_ops = {
@@ -404,6 +357,6 @@ const struct vport_ops ovs_gre64_vport_ops = {
 	.exit		= gre_exit,
 	.create		= gre64_create,
 	.destroy	= gre64_tnl_destroy,
-	.get_name	= ovs_tnl_get_name,
-	.send		= ovs_tnl_send,
+	.get_name	= gre64_tnl_get_name,
+	.send		= gre64_tnl_send,
 };
diff --git a/datapath/vport-lisp.c b/datapath/vport-lisp.c
index 1fff5ae..8d9e232 100644
--- a/datapath/vport-lisp.c
+++ b/datapath/vport-lisp.c
@@ -30,6 +30,7 @@
 
 #include <net/icmp.h>
 #include <net/ip.h>
+#include <net/ip_tunnels.h>
 #include <net/udp.h>
 
 #include "datapath.h"
@@ -94,11 +95,6 @@ struct lisphdr {
 
 #define LISP_HLEN (sizeof(struct udphdr) + sizeof(struct lisphdr))
 
-static inline int lisp_hdr_len(const struct ovs_key_ipv4_tunnel *tun_key)
-{
-	return LISP_HLEN;
-}
-
 /**
  * struct lisp_port - Keeps track of open UDP ports
  * @list: list element.
@@ -107,11 +103,18 @@ static inline int lisp_hdr_len(const struct ovs_key_ipv4_tunnel *tun_key)
  */
 struct lisp_port {
 	struct list_head list;
+	__be16 dst_port;
 	struct vport *vport;
 	struct socket *lisp_rcv_socket;
 	struct rcu_head rcu;
+	char name[IFNAMSIZ];
 };
 
+static inline struct lisp_port *lisp_vport_priv(const struct vport *vport)
+{
+	return vport_priv(vport);
+}
+
 static LIST_HEAD(lisp_ports);
 
 static struct lisp_port *lisp_find_port(struct net *net, __be16 port)
@@ -119,9 +122,7 @@ static struct lisp_port *lisp_find_port(struct net *net, __be16 port)
 	struct lisp_port *lisp_port;
 
 	list_for_each_entry_rcu(lisp_port, &lisp_ports, list) {
-		struct tnl_vport *tnl_vport = tnl_vport_priv(lisp_port->vport);
-
-		if (tnl_vport->dst_port == port &&
+		if (lisp_port->dst_port == port &&
 			net_eq(sock_net(lisp_port->lisp_rcv_socket->sk), net))
 			return lisp_port;
 	}
@@ -134,25 +135,6 @@ static inline struct lisphdr *lisp_hdr(const struct sk_buff *skb)
 	return (struct lisphdr *)(udp_hdr(skb) + 1);
 }
 
-static int lisp_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-	int tnl_len;
-	int network_offset = skb_network_offset(skb);
-
-	/* We only encapsulate IPv4 and IPv6 packets */
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-	case htons(ETH_P_IPV6):
-		/* Pop off "inner" Ethernet header */
-		skb_pull(skb, network_offset);
-		tnl_len = ovs_tnl_send(vport, skb);
-		return tnl_len > 0 ? tnl_len + network_offset : tnl_len;
-	default:
-		kfree_skb(skb);
-		return 0;
-	}
-}
-
 /* Convert 64 bit tunnel ID to 24 bit Instance ID. */
 static void tunnel_id_to_instance_id(__be64 tun_id, __u8 *iid)
 {
@@ -180,19 +162,36 @@ static __be64 instance_id_to_tunnel_id(__u8 *iid)
 #endif
 }
 
-static void lisp_build_header(const struct vport *vport,
-			      struct sk_buff *skb,
-			      int tunnel_hlen)
+static struct sk_buff *lisp_build_header(const struct vport *vport,
+					 struct sk_buff *skb,
+					 int tunnel_hlen)
 {
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-	struct udphdr *udph = udp_hdr(skb);
-	struct lisphdr *lisph = (struct lisphdr *)(udph + 1);
+	struct lisp_port *lisp_port = lisp_vport_priv(vport);
+	struct udphdr *udph;
+	struct lisphdr *lisph;
 	const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
 
-	udph->dest = tnl_vport->dst_port;
-	udph->source = htons(ovs_tnl_get_src_port(skb));
+	if (skb_is_gso(skb)) {
+		struct sk_buff *nskb;
+
+		nskb = __skb_gso_segment(skb, 0, false);
+		if (IS_ERR(nskb)) {
+			kfree_skb(skb);
+			return nskb;
+		}
+
+		consume_skb(skb);
+		skb = nskb;
+	}
+
+	skb_push(skb, LISP_HLEN);
+	udph = (struct udphdr *) skb->data;
+	lisph = (struct lisphdr *)(udph + 1);
+
+	udph->dest = lisp_port->dst_port;
+	udph->source = htons(ovs_tunnel_source_port(skb));
 	udph->check = 0;
-	udph->len = htons(skb->len - skb_transport_offset(skb));
+	udph->len = htons(skb->len);
 
 	lisph->nonce_present = 0;	/* We don't support echo nonce algorithm */
 	lisph->locator_status_bits_present = 1;	/* Set LSB */
@@ -207,6 +206,28 @@ static void lisp_build_header(const struct vport *vport,
 
 	tunnel_id_to_instance_id(tun_key->tun_id, &lisph->u2.word2.instance_id[0]);
 	lisph->u2.word2.locator_status_bits = 1;
+
+	return skb;
+}
+
+static int lisp_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+	int tnl_len;
+	int network_offset = skb_network_offset(skb);
+
+	/* We only encapsulate IPv4 and IPv6 packets */
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+	case htons(ETH_P_IPV6):
+		/* Pop off "inner" Ethernet header */
+		skb_pull(skb, network_offset);
+		tnl_len = ovs_tnl_send(vport, skb, IPPROTO_UDP,
+				       LISP_HLEN, lisp_build_header);
+		return tnl_len > 0 ? tnl_len + network_offset : tnl_len;
+	default:
+		kfree_skb(skb);
+		return 0;
+	}
 }
 
 /* Called with rcu_read_lock and BH disabled. */
@@ -270,162 +291,117 @@ out:
 	return 0;
 }
 
+static int lisp_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+	struct lisp_port *lisp_port = lisp_vport_priv(vport);
+
+	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(lisp_port->dst_port)))
+		return -EMSGSIZE;
+	return 0;
+}
+
+static void lisp_tnl_destroy(struct vport *vport)
+{
+	struct lisp_port *lisp_port = lisp_vport_priv(vport);
+
+	list_del_rcu(&lisp_port->list);
+	/* Release socket */
+	sk_release_kernel(lisp_port->lisp_rcv_socket->sk);
+	ovs_vport_deferred_destroy(vport);
+}
+
 /* Arbitrary value.  Irrelevant as long as it's not 0 since we set the handler. */
 #define UDP_ENCAP_LISP 1
-static int lisp_socket_init(struct lisp_port *lisp_port, struct net *net)
+static struct socket *lisp_socket_init(struct net *net, __be32 dst_port)
 {
 	int err;
 	struct sockaddr_in sin;
-	struct tnl_vport *tnl_vport = tnl_vport_priv(lisp_port->vport);
+	struct socket *socket;
 
 	err = sock_create_kern(AF_INET, SOCK_DGRAM, 0,
-			       &lisp_port->lisp_rcv_socket);
+			       &socket);
 	if (err)
 		goto error;
 
 	/* release net ref. */
-	sk_change_net(lisp_port->lisp_rcv_socket->sk, net);
+	sk_change_net(socket->sk, net);
 
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = tnl_vport->dst_port;
+	sin.sin_port = dst_port;
 
-	err = kernel_bind(lisp_port->lisp_rcv_socket, (struct sockaddr *)&sin,
+	err = kernel_bind(socket, (struct sockaddr *)&sin,
 			  sizeof(struct sockaddr_in));
 	if (err)
 		goto error_sock;
 
-	udp_sk(lisp_port->lisp_rcv_socket->sk)->encap_type = UDP_ENCAP_LISP;
-	udp_sk(lisp_port->lisp_rcv_socket->sk)->encap_rcv = lisp_rcv;
+	udp_sk(socket->sk)->encap_type = UDP_ENCAP_LISP;
+	udp_sk(socket->sk)->encap_rcv = lisp_rcv;
 
 	udp_encap_enable();
 
-	return 0;
+	return socket;
 
 error_sock:
-	sk_release_kernel(lisp_port->lisp_rcv_socket->sk);
+	sk_release_kernel(socket->sk);
 error:
 	pr_warn("cannot register lisp protocol handler: %d\n", err);
-	return err;
-}
-
-
-static void free_port_rcu(struct rcu_head *rcu)
-{
-	struct lisp_port *lisp_port = container_of(rcu,
-			struct lisp_port, rcu);
-
-	kfree(lisp_port);
-}
-
-static void lisp_tunnel_release(struct lisp_port *lisp_port)
-{
-	if (!lisp_port)
-		return;
-	list_del_rcu(&lisp_port->list);
-	/* Release socket */
-	sk_release_kernel(lisp_port->lisp_rcv_socket->sk);
-	call_rcu(&lisp_port->rcu, free_port_rcu);
+	return ERR_PTR(err);
 }
 
-static int lisp_tunnel_setup(struct net *net, struct vport *vport,
-			     struct nlattr *options)
+static struct vport *lisp_tnl_create(const struct vport_parms *parms)
 {
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+	struct nlattr *options = parms->options;
+	struct net *net = ovs_dp_get_net(parms->dp);
 	struct lisp_port *lisp_port;
+	struct vport *vport;
 	struct nlattr *a;
-	int err;
-	u16 dst_port;
+	__be16 dst_port;
+	int err = 0;
 
-	if (!options) {
-		err = -EINVAL;
-		goto out;
-	}
+	if (!options)
+		return ERR_PTR(-EINVAL);
+	vport = ovs_vport_alloc(sizeof(*lisp_port),
+			&ovs_lisp_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
 	if (a && nla_len(a) == sizeof(u16)) {
-		dst_port = nla_get_u16(a);
+		dst_port = htons(nla_get_u16(a));
 	} else {
 		/* Require destination port from userspace. */
 		err = -EINVAL;
-		goto out;
+		goto error_free;
 	}
 
 	/* Verify if we already have a socket created for this port */
-	lisp_port = lisp_find_port(net, htons(dst_port));
+	lisp_port = lisp_find_port(net, dst_port);
 	if (lisp_port) {
 		err = -EEXIST;
-		goto out;
+		goto error_free;
 	}
 
-	/* Add a new socket for this port */
-	lisp_port = kzalloc(sizeof(struct lisp_port), GFP_KERNEL);
-	if (!lisp_port) {
-		err = -ENOMEM;
-		goto out;
-	}
+	lisp_port = lisp_vport_priv(vport);
+	lisp_port->lisp_rcv_socket = lisp_socket_init(net, dst_port);
+	if (IS_ERR(lisp_port->lisp_rcv_socket))
+		goto error_free;
 
-	tnl_vport->dst_port = htons(dst_port);
+	strncpy(lisp_port->name, parms->name, IFNAMSIZ);
+	lisp_port->dst_port = dst_port;
 	lisp_port->vport = vport;
 	list_add_tail_rcu(&lisp_port->list, &lisp_ports);
 
-	err = lisp_socket_init(lisp_port, net);
-	if (err)
-		goto error;
-
-	return 0;
-
-error:
-	list_del_rcu(&lisp_port->list);
-	kfree(lisp_port);
-out:
-	return err;
-}
-
-static int lisp_get_options(const struct vport *vport, struct sk_buff *skb)
-{
-	const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-
-	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(tnl_vport->dst_port)))
-		return -EMSGSIZE;
-	return 0;
-}
-
-static const struct tnl_ops ovs_lisp_tnl_ops = {
-	.ipproto	= IPPROTO_UDP,
-	.hdr_len	= lisp_hdr_len,
-	.build_header	= lisp_build_header,
-};
-
-static void lisp_tnl_destroy(struct vport *vport)
-{
-	struct lisp_port *lisp_port;
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-
-	lisp_port = lisp_find_port(ovs_dp_get_net(vport->dp),
-				   tnl_vport->dst_port);
-
-	lisp_tunnel_release(lisp_port);
-	ovs_tnl_destroy(vport);
+	return vport;
+error_free:
+	ovs_vport_free(vport);
+	return ERR_PTR(err);
 }
 
-static struct vport *lisp_tnl_create(const struct vport_parms *parms)
+static const char *lisp_tnl_get_name(const struct vport *vport)
 {
-	struct vport *vport;
-	int err;
-
-	vport = ovs_tnl_create(parms, &ovs_lisp_vport_ops, &ovs_lisp_tnl_ops);
-	if (IS_ERR(vport))
-		return vport;
-
-	err = lisp_tunnel_setup(ovs_dp_get_net(parms->dp), vport,
-				parms->options);
-	if (err) {
-		ovs_tnl_destroy(vport);
-		return ERR_PTR(err);
-	}
-
-	return vport;
+	struct lisp_port *lisp_port = lisp_vport_priv(vport);
+	return lisp_port->name;
 }
 
 const struct vport_ops ovs_lisp_vport_ops = {
@@ -433,7 +409,7 @@ const struct vport_ops ovs_lisp_vport_ops = {
 	.flags		= VPORT_F_TUN_ID,
 	.create		= lisp_tnl_create,
 	.destroy	= lisp_tnl_destroy,
-	.get_name	= ovs_tnl_get_name,
+	.get_name	= lisp_tnl_get_name,
 	.get_options	= lisp_get_options,
 	.send		= lisp_tnl_send,
 };
diff --git a/datapath/vport-vxlan.c b/datapath/vport-vxlan.c
index 1850fc2..cf3064b 100644
--- a/datapath/vport-vxlan.c
+++ b/datapath/vport-vxlan.c
@@ -30,116 +30,58 @@
 
 #include <net/icmp.h>
 #include <net/ip.h>
+#include <net/ip_tunnels.h>
 #include <net/udp.h>
+#include <net/vxlan.h>
 
 #include "datapath.h"
 #include "tunnel.h"
 #include "vport.h"
 
-#define VXLAN_FLAGS 0x08000000  /* struct vxlanhdr.vx_flags required value. */
-
-/**
- * struct vxlanhdr - VXLAN header
- * @vx_flags: Must have the exact value %VXLAN_FLAGS.
- * @vx_vni: VXLAN Network Identifier (VNI) in top 24 bits, low 8 bits zeroed.
- */
-struct vxlanhdr {
-	__be32 vx_flags;
-	__be32 vx_vni;
+struct ovs_vxlan_port {
+	struct vxlan_port port;
+	char name[IFNAMSIZ];
 };
 
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-static inline int vxlan_hdr_len(const struct ovs_key_ipv4_tunnel *tun_key)
+static inline struct ovs_vxlan_port *vxlan_vport_priv(const struct vport *vport)
 {
-	return VXLAN_HLEN;
+	return vport_priv(vport);
 }
 
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @list: list element.
- * @vport: vport for the tunnel.
- * @socket: The socket created for this port number.
- */
-struct vxlan_port {
-	struct list_head list;
-	struct vport *vport;
-	struct socket *vxlan_rcv_socket;
-	struct rcu_head rcu;
-};
-
-static LIST_HEAD(vxlan_ports);
-
-static struct vxlan_port *vxlan_find_port(struct net *net, __be16 port)
+static struct sk_buff *__vxlan_build_header(const struct vport *vport,
+					  struct sk_buff *skb,
+					  int tunnel_hlen)
 {
-	struct vxlan_port *vxlan_port;
-
-	list_for_each_entry_rcu(vxlan_port, &vxlan_ports, list) {
-		struct tnl_vport *tnl_vport = tnl_vport_priv(vxlan_port->vport);
-
-		if (tnl_vport->dst_port == port &&
-			net_eq(sock_net(vxlan_port->vxlan_rcv_socket->sk), net))
-			return vxlan_port;
-	}
-
-	return NULL;
-}
-
-static inline struct vxlanhdr *vxlan_hdr(const struct sk_buff *skb)
-{
-	return (struct vxlanhdr *)(udp_hdr(skb) + 1);
-}
-
-static void vxlan_build_header(const struct vport *vport,
-			       struct sk_buff *skb,
-			       int tunnel_hlen)
-{
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-	struct udphdr *udph = udp_hdr(skb);
-	struct vxlanhdr *vxh = (struct vxlanhdr *)(udph + 1);
+	struct ovs_vxlan_port *ovs_vxlan_port = vxlan_vport_priv(vport);
 	const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
+	u16 source_port;
 
-	udph->dest = tnl_vport->dst_port;
-	udph->source = htons(ovs_tnl_get_src_port(skb));
-	udph->check = 0;
-	udph->len = htons(skb->len - skb_transport_offset(skb));
-
-	vxh->vx_flags = htonl(VXLAN_FLAGS);
-	vxh->vx_vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
+	source_port = ovs_tunnel_source_port(skb);
+	skb = vxlan_build_header(&ovs_vxlan_port->port, source_port, skb,
+			   (htonl(be64_to_cpu(tun_key->tun_id) << 8)));
+	return skb;
 }
 
 /* Called with rcu_read_lock and BH disabled. */
-static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+static int vxlan_rcv(struct vxlan_port *port, struct sk_buff *skb, __be32 vni)
 {
-	struct vxlan_port *vxlan_vport;
-	struct vxlanhdr *vxh;
 	struct iphdr *iph;
 	struct ovs_key_ipv4_tunnel tun_key;
+	struct vport *vport = port->user_data;
 	__be64 key;
 
-	vxlan_vport = vxlan_find_port(dev_net(skb->dev), udp_hdr(skb)->dest);
-	if (unlikely(!vxlan_vport))
-		goto error;
-
-	if (unlikely(!pskb_may_pull(skb, VXLAN_HLEN + ETH_HLEN)))
-		goto error;
-
-	vxh = vxlan_hdr(skb);
-	if (unlikely(vxh->vx_flags != htonl(VXLAN_FLAGS) ||
-		     vxh->vx_vni & htonl(0xff)))
+	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 		goto error;
 
-	__skb_pull(skb, VXLAN_HLEN);
 	skb_postpull_rcsum(skb, skb_transport_header(skb), VXLAN_HLEN + ETH_HLEN);
 
-	key = cpu_to_be64(ntohl(vxh->vx_vni) >> 8);
-
-	/* Save outer tunnel values */
+	key = cpu_to_be64(ntohl(vni) >> 8);
 	iph = ip_hdr(skb);
+	/* Save outer tunnel values */
 	tnl_tun_key_init(&tun_key, iph, key, OVS_TNL_F_KEY);
 	OVS_CB(skb)->tun_key = &tun_key;
 
-	ovs_tnl_rcv(vxlan_vport->vport, skb);
+	ovs_tnl_rcv(vport, skb);
 	goto out;
 
 error:
@@ -148,77 +90,47 @@ out:
 	return 0;
 }
 
-/* Random value.  Irrelevant as long as it's not 0 since we set the handler. */
-#define UDP_ENCAP_VXLAN 1
-static int vxlan_socket_init(struct vxlan_port *vxlan_port, struct net *net)
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 {
-	int err;
-	struct sockaddr_in sin;
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vxlan_port->vport);
-
-	err = sock_create_kern(AF_INET, SOCK_DGRAM, 0,
-			       &vxlan_port->vxlan_rcv_socket);
-	if (err)
-		goto error;
-
-	/* release net ref. */
-	sk_change_net(vxlan_port->vxlan_rcv_socket->sk, net);
-
-	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = tnl_vport->dst_port;
-
-	err = kernel_bind(vxlan_port->vxlan_rcv_socket, (struct sockaddr *)&sin,
-			  sizeof(struct sockaddr_in));
-	if (err)
-		goto error_sock;
-
-	udp_sk(vxlan_port->vxlan_rcv_socket->sk)->encap_type = UDP_ENCAP_VXLAN;
-	udp_sk(vxlan_port->vxlan_rcv_socket->sk)->encap_rcv = vxlan_rcv;
+	struct ovs_vxlan_port *ovs_vxlan_port = vxlan_vport_priv(vport);
+	struct vxlan_port *vxlan_port;
 
-	udp_encap_enable();
+	vxlan_port = &ovs_vxlan_port->port;
 
+	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT,
+			ntohs(ovs_vxlan_port->port.portno)))
+		return -EMSGSIZE;
 	return 0;
-
-error_sock:
-	sk_release_kernel(vxlan_port->vxlan_rcv_socket->sk);
-error:
-	pr_warn("cannot register vxlan protocol handler\n");
-	return err;
 }
 
-static void free_port_rcu(struct rcu_head *rcu)
-{
-	struct vxlan_port *vxlan_port = container_of(rcu,
-			struct vxlan_port, rcu);
-
-	kfree(vxlan_port);
-}
-
-static void vxlan_tunnel_release(struct vxlan_port *vxlan_port)
+static void vxlan_tnl_destroy(struct vport *vport)
 {
-	if (!vxlan_port)
-		return;
+	struct ovs_vxlan_port *ovs_vxlan_port = vxlan_vport_priv(vport);
+	struct vxlan_port *vxlan_port;
 
-	list_del_rcu(&vxlan_port->list);
-	/* Release socket */
-	sk_release_kernel(vxlan_port->vxlan_rcv_socket->sk);
-	call_rcu(&vxlan_port->rcu, free_port_rcu);
+	vxlan_port = &ovs_vxlan_port->port;
+	vxlan_del_handler(ovs_dp_get_net(vport->dp), vxlan_port);
+	ovs_vport_deferred_destroy(vport);
 }
 
-static int vxlan_tunnel_setup(struct net *net, struct vport *vport,
-			      struct nlattr *options)
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
 {
+	struct nlattr *options = parms->options;
+	struct ovs_vxlan_port *ovs_vxlan_port;
 	struct vxlan_port *vxlan_port;
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+	struct vport *vport;
+	struct net *net;
 	struct nlattr *a;
-	int err;
 	u16 dst_port;
+	int err;
 
-	if (!options) {
-		err = -EINVAL;
-		goto out;
-	}
+	if (!options)
+		return ERR_PTR(-EINVAL);
+
+	vport = ovs_vport_alloc(sizeof(struct ovs_vxlan_port),
+				&ovs_vxlan_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
 	if (a && nla_len(a) == sizeof(u16)) {
@@ -226,84 +138,35 @@ static int vxlan_tunnel_setup(struct net *net, struct vport *vport,
 	} else {
 		/* Require destination port from userspace. */
 		err = -EINVAL;
-		goto out;
-	}
-
-	/* Verify if we already have a socket created for this port */
-	vxlan_port = vxlan_find_port(net, htons(dst_port));
-	if (vxlan_port) {
-		err = -EEXIST;
-		goto out;
+		goto error_free;
 	}
-
-	/* Add a new socket for this port */
-	vxlan_port = kzalloc(sizeof(struct vxlan_port), GFP_KERNEL);
-	if (!vxlan_port) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	tnl_vport->dst_port = htons(dst_port);
-	vxlan_port->vport = vport;
-	list_add_tail_rcu(&vxlan_port->list, &vxlan_ports);
-
-	err = vxlan_socket_init(vxlan_port, net);
+	net = ovs_dp_get_net(parms->dp);
+	ovs_vxlan_port = vxlan_vport_priv(vport);
+	strncpy(ovs_vxlan_port->name, parms->name, IFNAMSIZ);
+	vxlan_port = &ovs_vxlan_port->port;
+	vxlan_port->portno = htons(dst_port);
+	vxlan_port->vx_rcv = vxlan_rcv;
+	vxlan_port->user_data = vport;
+	err = vxlan_add_handler(net, vxlan_port);
 	if (err)
-		goto error;
-
-	return 0;
-
-error:
-	list_del_rcu(&vxlan_port->list);
-	kfree(vxlan_port);
-out:
-	return err;
-}
+		goto error_free;
 
-static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
-{
-	const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-
-	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(tnl_vport->dst_port)))
-		return -EMSGSIZE;
-	return 0;
+	return vport;
+error_free:
+	ovs_vport_free(vport);
+	return ERR_PTR(err);
 }
 
-static const struct tnl_ops ovs_vxlan_tnl_ops = {
-	.ipproto	= IPPROTO_UDP,
-	.hdr_len	= vxlan_hdr_len,
-	.build_header	= vxlan_build_header,
-};
-
-static void vxlan_tnl_destroy(struct vport *vport)
+static const char *vxlan_tnl_get_name(const struct vport *vport)
 {
-	struct vxlan_port *vxlan_port;
-	struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
-
-	vxlan_port = vxlan_find_port(ovs_dp_get_net(vport->dp),
-					 tnl_vport->dst_port);
-
-	vxlan_tunnel_release(vxlan_port);
-	ovs_tnl_destroy(vport);
+	struct ovs_vxlan_port *ovs_vxlan_port = vxlan_vport_priv(vport);
+	return ovs_vxlan_port->name;
 }
 
-static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
-	int err;
-	struct vport *vport;
-
-	vport = ovs_tnl_create(parms, &ovs_vxlan_vport_ops, &ovs_vxlan_tnl_ops);
-	if (IS_ERR(vport))
-		return vport;
-
-	err = vxlan_tunnel_setup(ovs_dp_get_net(parms->dp), vport,
-				 parms->options);
-	if (err) {
-		ovs_tnl_destroy(vport);
-		return ERR_PTR(err);
-	}
-
-	return vport;
+	return ovs_tnl_send(vport, skb, IPPROTO_UDP,
+			    VXLAN_HLEN, __vxlan_build_header);
 }
 
 const struct vport_ops ovs_vxlan_vport_ops = {
@@ -311,9 +174,9 @@ const struct vport_ops ovs_vxlan_vport_ops = {
 	.flags		= VPORT_F_TUN_ID,
 	.create		= vxlan_tnl_create,
 	.destroy	= vxlan_tnl_destroy,
-	.get_name	= ovs_tnl_get_name,
+	.get_name	= vxlan_tnl_get_name,
 	.get_options	= vxlan_get_options,
-	.send		= ovs_tnl_send,
+	.send		= vxlan_tnl_send,
 };
 #else
 #warning VXLAN tunneling will not be available on kernels before 2.6.26
diff --git a/datapath/vport.c b/datapath/vport.c
index d458a95..bfaf410 100644
--- a/datapath/vport.c
+++ b/datapath/vport.c
@@ -208,6 +208,21 @@ void ovs_vport_free(struct vport *vport)
 	kfree(vport);
 }
 
+static void free_vport_rcu(struct rcu_head *rcu)
+{
+	struct vport *vport = container_of(rcu,	struct vport, rcu);
+
+	ovs_vport_free(vport);
+}
+
+void ovs_vport_deferred_destroy(struct vport *vport)
+{
+	if (!vport)
+		return;
+
+	call_rcu(&vport->rcu, free_vport_rcu);
+}
+
 /**
  *	ovs_vport_add - add vport device (for kernel callers)
  *
diff --git a/datapath/vport.h b/datapath/vport.h
index 074c6ee..77e34f0 100644
--- a/datapath/vport.h
+++ b/datapath/vport.h
@@ -33,6 +33,8 @@ struct vport_parms;
 struct vport_net {
 	struct vport __rcu *gre_vport;
 	struct vport __rcu *gre64_vport;
+	char gre_name[IFNAMSIZ];
+	char gre64_name[IFNAMSIZ];
 };
 
 int ovs_vport_init(void);
@@ -40,6 +42,7 @@ void ovs_vport_exit(void);
 
 struct vport *ovs_vport_add(const struct vport_parms *);
 void ovs_vport_del(struct vport *);
+void ovs_vport_deferred_destroy(struct vport *vport);
 
 struct vport *ovs_vport_locate(struct net *net, const char *name);
 
-- 
1.7.1




More information about the dev mailing list