[ovs-dev] [PATCH 2/4] netdev-offload-dpdk: Support IPv4 fragmentation types

Finn, Emma emma.finn at intel.com
Thu Jan 14 09:09:39 UTC 2021



> -----Original Message-----
> From: dev <ovs-dev-bounces at openvswitch.org> On Behalf Of Eli Britstein
> Sent: Sunday 3 January 2021 11:55
> To: dev at openvswitch.org; Ilya Maximets <i.maximets at ovn.org>
> Cc: Eli Britstein <elibr at nvidia.com>
> Subject: [ovs-dev] [PATCH 2/4] netdev-offload-dpdk: Support IPv4
> fragmentation types
> 
> Support IPv4 fragmentation matching.
> 
> Signed-off-by: Eli Britstein <elibr at nvidia.com>
> ---
>  lib/netdev-offload-dpdk.c | 47 +++++++++++++++++++++++++++++++++++++--
>  1 file changed, 45 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c index
> 5bf486497..d1b754211 100644
> --- a/lib/netdev-offload-dpdk.c
> +++ b/lib/netdev-offload-dpdk.c
> @@ -210,12 +210,18 @@ dump_flow_pattern(struct ds *s, const struct
> rte_flow_item *item)
>      } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
>          const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
>          const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
> +        const struct rte_flow_item_ipv4 *ipv4_last = item->last;
> 
>          ds_put_cstr(s, "ipv4 ");
>          if (ipv4_spec) {
> +            ovs_be16 fragment_offset_mask;
> +
>              if (!ipv4_mask) {
>                  ipv4_mask = &rte_flow_item_ipv4_mask;
>              }
> +            if (!ipv4_last) {
> +                ipv4_last = &rte_flow_item_ipv4_mask;
> +            }
>              DUMP_PATTERN_ITEM(ipv4_mask->hdr.src_addr, false, "src", IP_FMT,
>                                IP_ARGS(ipv4_spec->hdr.src_addr),
>                                IP_ARGS(ipv4_mask->hdr.src_addr), IP_ARGS(0)); @@ -
> 231,6 +237,16 @@ dump_flow_pattern(struct ds *s, const struct rte_flow_item
> *item)
>              DUMP_PATTERN_ITEM(ipv4_mask->hdr.time_to_live, false, "ttl",
>                                "0x%"PRIx8, ipv4_spec->hdr.time_to_live,
>                                ipv4_mask->hdr.time_to_live, 0);
> +            fragment_offset_mask = ipv4_mask->hdr.fragment_offset ==
> +                                   htons(RTE_IPV4_HDR_OFFSET_MASK |
> +                                         RTE_IPV4_HDR_MF_FLAG)
> +                                   ? OVS_BE16_MAX
> +                                   : ipv4_mask->hdr.fragment_offset;
> +            DUMP_PATTERN_ITEM(fragment_offset_mask, item->last,
> +                              "fragment_offset", "0x%"PRIx16,
> +                              ntohs(ipv4_spec->hdr.fragment_offset),
> +                              ntohs(ipv4_mask->hdr.fragment_offset),
> +                              ntohs(ipv4_last->hdr.fragment_offset));
>          }
>          ds_put_cstr(s, "/ ");
>      } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { @@ -764,7 +780,7
> @@ parse_flow_match(struct flow_patterns *patterns,
> 
>      /* IP v4 */
>      if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
> -        struct rte_flow_item_ipv4 *spec, *mask;
> +        struct rte_flow_item_ipv4 *spec, *mask, *last = NULL;
> 
>          spec = xzalloc(sizeof *spec);
>          mask = xzalloc(sizeof *mask);
> @@ -787,7 +803,34 @@ parse_flow_match(struct flow_patterns *patterns,
>          consumed_masks->nw_src = 0;
>          consumed_masks->nw_dst = 0;
> 
> -        add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask,
> NULL);
> +        if (match->wc.masks.nw_frag & FLOW_NW_FRAG_ANY) {
> +            if (!(match->flow.nw_frag & FLOW_NW_FRAG_ANY)) {
> +                /* frag=no. */
> +                spec->hdr.fragment_offset = 0;

Hi Eli, 
Why not set the mask here below equal to zero also when not dealing with fragmented packets ?
This breaks hardware offload of non-fragmented packets in XL710 devices.
The i40e pmd in DPDK is expecting this mask to be zero.

Thanks,
Emma

> +                mask->hdr.fragment_offset = htons(RTE_IPV4_HDR_OFFSET_MASK |
> +                                                  RTE_IPV4_HDR_MF_FLAG);
> +            } else if (match->wc.masks.nw_frag & FLOW_NW_FRAG_LATER) {
> +                if (!(match->flow.nw_frag & FLOW_NW_FRAG_LATER)) {
> +                    /* frag=first. */
> +                    spec->hdr.fragment_offset = htons(RTE_IPV4_HDR_MF_FLAG);
> +                    mask->hdr.fragment_offset =
> htons(RTE_IPV4_HDR_OFFSET_MASK |
> +                                                      RTE_IPV4_HDR_MF_FLAG);
> +                } else {
> +                    /* frag=later. */
> +                    last = xzalloc(sizeof *last);
> +                    spec->hdr.fragment_offset = htons(1 <<
> RTE_IPV4_HDR_FO_SHIFT);
> +                    mask->hdr.fragment_offset =
> htons(RTE_IPV4_HDR_OFFSET_MASK);
> +                    last->hdr.fragment_offset =
> htons(RTE_IPV4_HDR_OFFSET_MASK);
> +                }
> +            } else {
> +                VLOG_WARN_RL(&rl, "Unknown IPv4 frag (0x%x/0x%x)",
> +                             match->flow.nw_frag, match->wc.masks.nw_frag);
> +                return -1;
> +            }
> +            consumed_masks->nw_frag = 0;
> +        }
> +
> +        add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask,
> + last);
> 
>          /* Save proto for L4 protocol setup. */
>          proto = spec->hdr.next_proto_id &
> --
> 2.28.0.546.g385c171
> 
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev


More information about the dev mailing list