[ovs-dev] [PATCH V2 14/14] netdev-dpdk-offload: Add vxlan pattern matching function
Eli Britstein
elibr at nvidia.com
Wed Feb 10 15:27:02 UTC 2021
For VXLAN offload, matches should be done on outer header for tunnel
properties as well as inner packet matches. Add a function for parsing
VXLAN tunnel matches.
Signed-off-by: Eli Britstein <elibr at nvidia.com>
Reviewed-by: Gaetan Rivet <gaetanr at nvidia.com>
---
NEWS | 1 +
lib/netdev-offload-dpdk.c | 169 +++++++++++++++++++++++++++++++++++++-
2 files changed, 168 insertions(+), 2 deletions(-)
diff --git a/NEWS b/NEWS
index 6850d5621..2ed67ea27 100644
--- a/NEWS
+++ b/NEWS
@@ -27,6 +27,7 @@ v2.15.0 - xx xxx xxxx
* Removed support for vhost-user dequeue zero-copy.
* Add support for DPDK 20.11.
* Add hardware offload support for tunnel pop action (experimental).
+ * Add hardware offload support for VXLAN flows (experimental).
- Userspace datapath:
* Add the 'pmd' option to "ovs-appctl dpctl/dump-flows", which
restricts a flow dump to a single PMD thread if set.
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index ad47d717f..6702548c3 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -366,6 +366,20 @@ dump_flow_pattern(struct ds *s,
ipv6_mask->hdr.hop_limits);
}
ds_put_cstr(s, "/ ");
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
+ const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
+
+ ds_put_cstr(s, "vxlan ");
+ if (vxlan_spec) {
+ if (!vxlan_mask) {
+ vxlan_mask = &rte_flow_item_vxlan_mask;
+ }
+ DUMP_PATTERN_ITEM(vxlan_mask->vni, "vni", "%"PRIu32,
+ ntohl(*(ovs_be32 *) vxlan_spec->vni) >> 8,
+ ntohl(*(ovs_be32 *) vxlan_mask->vni) >> 8);
+ }
+ ds_put_cstr(s, "/ ");
} else {
ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
}
@@ -783,7 +797,154 @@ free_flow_actions(struct flow_actions *actions)
}
static int
-parse_flow_match(struct flow_patterns *patterns,
+parse_tnl_ip_match(struct flow_patterns *patterns,
+ struct match *match,
+ uint8_t proto)
+{
+ struct flow *consumed_masks;
+
+ consumed_masks = &match->wc.masks;
+ /* IP v4 */
+ if (match->wc.masks.tunnel.ip_src || match->wc.masks.tunnel.ip_dst) {
+ struct rte_flow_item_ipv4 *spec, *mask;
+
+ spec = xzalloc(sizeof *spec);
+ mask = xzalloc(sizeof *mask);
+
+ spec->hdr.type_of_service = match->flow.tunnel.ip_tos;
+ spec->hdr.time_to_live = match->flow.tunnel.ip_ttl;
+ spec->hdr.next_proto_id = proto;
+ spec->hdr.src_addr = match->flow.tunnel.ip_src;
+ spec->hdr.dst_addr = match->flow.tunnel.ip_dst;
+
+ mask->hdr.type_of_service = match->wc.masks.tunnel.ip_tos;
+ mask->hdr.time_to_live = match->wc.masks.tunnel.ip_ttl;
+ mask->hdr.next_proto_id = UINT8_MAX;
+ mask->hdr.src_addr = match->wc.masks.tunnel.ip_src;
+ mask->hdr.dst_addr = match->wc.masks.tunnel.ip_dst;
+
+ consumed_masks->tunnel.ip_tos = 0;
+ consumed_masks->tunnel.ip_ttl = 0;
+ consumed_masks->tunnel.ip_src = 0;
+ consumed_masks->tunnel.ip_dst = 0;
+
+ add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask);
+ } else if (!is_all_zeros(&match->wc.masks.tunnel.ipv6_src,
+ sizeof(struct in6_addr)) ||
+ !is_all_zeros(&match->wc.masks.tunnel.ipv6_dst,
+ sizeof(struct in6_addr))) {
+ /* IP v6 */
+ struct rte_flow_item_ipv6 *spec, *mask;
+
+ spec = xzalloc(sizeof *spec);
+ mask = xzalloc(sizeof *mask);
+
+ spec->hdr.proto = proto;
+ spec->hdr.hop_limits = match->flow.tunnel.ip_ttl;
+ spec->hdr.vtc_flow = htonl((uint32_t) match->flow.tunnel.ip_tos <<
+ RTE_IPV6_HDR_TC_SHIFT);
+ memcpy(spec->hdr.src_addr, &match->flow.tunnel.ipv6_src,
+ sizeof spec->hdr.src_addr);
+ memcpy(spec->hdr.dst_addr, &match->flow.tunnel.ipv6_dst,
+ sizeof spec->hdr.dst_addr);
+
+ mask->hdr.proto = UINT8_MAX;
+ mask->hdr.hop_limits = match->wc.masks.tunnel.ip_ttl;
+ mask->hdr.vtc_flow = htonl((uint32_t) match->wc.masks.tunnel.ip_tos <<
+ RTE_IPV6_HDR_TC_SHIFT);
+ memcpy(mask->hdr.src_addr, &match->wc.masks.tunnel.ipv6_src,
+ sizeof mask->hdr.src_addr);
+ memcpy(mask->hdr.dst_addr, &match->wc.masks.tunnel.ipv6_dst,
+ sizeof mask->hdr.dst_addr);
+
+ consumed_masks->tunnel.ip_tos = 0;
+ consumed_masks->tunnel.ip_ttl = 0;
+ memset(&consumed_masks->tunnel.ipv6_src, 0,
+ sizeof consumed_masks->tunnel.ipv6_src);
+ memset(&consumed_masks->tunnel.ipv6_dst, 0,
+ sizeof consumed_masks->tunnel.ipv6_dst);
+
+ add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV6, spec, mask);
+ } else {
+ VLOG_ERR_RL(&rl, "Tunnel L3 protocol is neither IPv4 nor IPv6");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+parse_tnl_udp_match(struct flow_patterns *patterns,
+ struct match *match)
+{
+ struct flow *consumed_masks;
+ struct rte_flow_item_udp *spec, *mask;
+
+ consumed_masks = &match->wc.masks;
+
+ spec = xzalloc(sizeof *spec);
+ mask = xzalloc(sizeof *mask);
+
+ spec->hdr.src_port = match->flow.tunnel.tp_src;
+ spec->hdr.dst_port = match->flow.tunnel.tp_dst;
+
+ mask->hdr.src_port = match->wc.masks.tunnel.tp_src;
+ mask->hdr.dst_port = match->wc.masks.tunnel.tp_dst;
+
+ consumed_masks->tunnel.tp_src = 0;
+ consumed_masks->tunnel.tp_dst = 0;
+
+ add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
+}
+
+static int
+parse_vxlan_match(struct flow_patterns *patterns,
+ struct match *match)
+{
+ struct rte_flow_item_vxlan *vx_spec, *vx_mask;
+ struct flow *consumed_masks;
+ int ret;
+
+ ret = parse_tnl_ip_match(patterns, match, IPPROTO_UDP);
+ if (ret) {
+ return -1;
+ }
+ parse_tnl_udp_match(patterns, match);
+
+ consumed_masks = &match->wc.masks;
+ /* VXLAN */
+ vx_spec = xzalloc(sizeof *vx_spec);
+ vx_mask = xzalloc(sizeof *vx_mask);
+
+ put_unaligned_be32((ovs_be32 *) vx_spec->vni,
+ htonl(ntohll(match->flow.tunnel.tun_id) << 8));
+ put_unaligned_be32((ovs_be32 *) vx_mask->vni,
+ htonl(ntohll(match->wc.masks.tunnel.tun_id) << 8));
+
+ consumed_masks->tunnel.tun_id = 0;
+ consumed_masks->tunnel.flags = 0;
+
+ add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VXLAN, vx_spec, vx_mask);
+ return 0;
+}
+
+static int
+parse_flow_tnl_match(struct netdev *tnldev,
+ struct flow_patterns *patterns,
+ struct match *match)
+{
+ if (!netdev_vport_is_vport_class(tnldev->netdev_class)) {
+ return -1;
+ }
+ if (!strcmp(netdev_get_type(tnldev), "vxlan")) {
+ return parse_vxlan_match(patterns, match);
+ }
+ return -1;
+}
+
+static int
+parse_flow_match(struct netdev *netdev,
+ struct flow_patterns *patterns,
struct match *match)
{
struct flow *consumed_masks;
@@ -793,6 +954,10 @@ parse_flow_match(struct flow_patterns *patterns,
if (!flow_tnl_dst_is_set(&match->flow.tunnel)) {
memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
+ } else {
+ if (parse_flow_tnl_match(netdev, patterns, match)) {
+ return -1;
+ }
}
memset(&consumed_masks->in_port, 0, sizeof consumed_masks->in_port);
@@ -1636,7 +1801,7 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev,
struct ufid_to_rte_flow_data *flow_data = NULL;
int err;
- if (parse_flow_match(&patterns, match)) {
+ if (parse_flow_match(netdev, &patterns, match)) {
VLOG_DBG_RL(&rl, "%s: matches of ufid "UUID_FMT" are not supported",
netdev_get_name(netdev), UUID_ARGS((struct uuid *) ufid));
goto out;
--
2.28.0.546.g385c171
More information about the dev
mailing list