[ovs-dev] [PATCH V2 04/41] gre: refactor the gre_fb_xmit

Greg Rose gvrose8192 at gmail.com
Fri May 18 01:57:17 UTC 2018


From: William Tu <u9012063 at gmail.com>

Upstream commit:
    commit 862a03c35ed76c50a562f7406ad23315f7862642
    Author: William Tu <u9012063 at gmail.com>
    Date:   Fri Aug 25 09:21:27 2017 -0700

    gre: refactor the gre_fb_xmit

    The patch refactors the gre_fb_xmit function, by creating
    prepare_fb_xmit function for later ERSPAN collect_md mode patch.

    Signed-off-by: William Tu <u9012063 at gmail.com>
    Signed-off-by: David S. Miller <davem at davemloft.net>

Only the prepare_fb_xmit() function is pulled in.  Compatibility
issues prevent the refactor of gre_fb_xmit() but we need the
prepare_fb_xmit() function for the subsequent patch.

Cc: William Tu <u9012063 at gmail.com>
Signed-off-by: Greg Rose <gvrose8192 at gmail.com>
---
 datapath/linux/compat/ip_gre.c | 48 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

diff --git a/datapath/linux/compat/ip_gre.c b/datapath/linux/compat/ip_gre.c
index 3bd6e92..e30e428 100644
--- a/datapath/linux/compat/ip_gre.c
+++ b/datapath/linux/compat/ip_gre.c
@@ -488,6 +488,54 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
 	return ip_route_output_key(net, fl);
 }
 
+static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
+				      struct net_device *dev,
+				      struct flowi4 *fl,
+				      int tunnel_hlen)
+{
+	struct ip_tunnel_info *tun_info;
+	const struct ip_tunnel_key *key;
+	struct rtable *rt = NULL;
+	int min_headroom;
+	bool use_cache;
+	int err;
+
+	tun_info = skb_tunnel_info(skb);
+	key = &tun_info->key;
+	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
+
+	if (use_cache)
+		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
+	if (!rt) {
+		rt = gre_get_rt(skb, dev, fl, key);
+		if (IS_ERR(rt))
+			goto err_free_skb;
+		if (use_cache)
+			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
+					  fl->saddr);
+	}
+
+	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+			+ tunnel_hlen + sizeof(struct iphdr);
+	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+		int head_delta = SKB_DATA_ALIGN(min_headroom -
+						skb_headroom(skb) +
+						16);
+		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+				       0, GFP_ATOMIC);
+		if (unlikely(err))
+			goto err_free_rt;
+	}
+	return rt;
+
+err_free_rt:
+	ip_rt_put(rt);
+err_free_skb:
+	kfree_skb(skb);
+	dev->stats.tx_dropped++;
+	return NULL;
+}
+
 netdev_tx_t rpl_gre_fb_xmit(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
-- 
1.8.3.1



More information about the dev mailing list