[ovs-dev] [PATCH] netdev-offload-dpdk: Pass L4 proto-id to match in the L3 rte_flow_item

Sriharsha Basavapatna sriharsha.basavapatna at broadcom.com
Tue Oct 20 18:03:52 UTC 2020


The offload layer clears the L4 protocol mask in the L3 item, when the
L4 item is passed for matching, as an optimization. This can be confusing
while parsing the headers in the PMD. Also, the datapath flow specifies
this field to be matched. This optimization is best left to the PMD.
This patch restores the code to pass the L4 protocol type in L3 match.

Fixes: e8a2b5bf92bb ("netdev-dpdk: implement flow offload with rte flow")
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna at broadcom.com>
Acked-by: Eli Britstein <elibr at mellanox.com>

---
v3: Updated "Acked-by:" and rebased.

v2: Updated "fixes:" tag with the right commit id.
---

 lib/netdev-offload-dpdk.c | 23 -----------------------
 1 file changed, 23 deletions(-)

diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 4d19f93cd..786193e16 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -676,7 +676,6 @@ static int
 parse_flow_match(struct flow_patterns *patterns,
                  struct match *match)
 {
-    uint8_t *next_proto_mask = NULL;
     struct flow *consumed_masks;
     uint8_t proto = 0;
 
@@ -782,7 +781,6 @@ parse_flow_match(struct flow_patterns *patterns,
         /* Save proto for L4 protocol setup. */
         proto = spec->hdr.next_proto_id &
                 mask->hdr.next_proto_id;
-        next_proto_mask = &mask->hdr.next_proto_id;
     }
     /* If fragmented, then don't HW accelerate - for now. */
     if (match->wc.masks.nw_frag & match->flow.nw_frag) {
@@ -825,7 +823,6 @@ parse_flow_match(struct flow_patterns *patterns,
 
         /* Save proto for L4 protocol setup. */
         proto = spec->hdr.proto & mask->hdr.proto;
-        next_proto_mask = &mask->hdr.proto;
     }
 
     if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP  &&
@@ -858,11 +855,6 @@ parse_flow_match(struct flow_patterns *patterns,
         consumed_masks->tcp_flags = 0;
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_TCP, spec, mask);
-
-        /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
-        if (next_proto_mask) {
-            *next_proto_mask = 0;
-        }
     } else if (proto == IPPROTO_UDP) {
         struct rte_flow_item_udp *spec, *mask;
 
@@ -879,11 +871,6 @@ parse_flow_match(struct flow_patterns *patterns,
         consumed_masks->tp_dst = 0;
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
-
-        /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
-        if (next_proto_mask) {
-            *next_proto_mask = 0;
-        }
     } else if (proto == IPPROTO_SCTP) {
         struct rte_flow_item_sctp *spec, *mask;
 
@@ -900,11 +887,6 @@ parse_flow_match(struct flow_patterns *patterns,
         consumed_masks->tp_dst = 0;
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_SCTP, spec, mask);
-
-        /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
-        if (next_proto_mask) {
-            *next_proto_mask = 0;
-        }
     } else if (proto == IPPROTO_ICMP) {
         struct rte_flow_item_icmp *spec, *mask;
 
@@ -921,11 +903,6 @@ parse_flow_match(struct flow_patterns *patterns,
         consumed_masks->tp_dst = 0;
 
         add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_ICMP, spec, mask);
-
-        /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
-        if (next_proto_mask) {
-            *next_proto_mask = 0;
-        }
     }
 
     add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
-- 
2.25.0.rc2



More information about the dev mailing list