[ovs-dev] [PATCH ovs V4 10/24] netdev-tc-offloads: Implement netdev flow dump api using tc interface
Roi Dayan
roid at mellanox.com
Mon Mar 13 13:37:00 UTC 2017
From: Paul Blakey <paulb at mellanox.com>
Signed-off-by: Paul Blakey <paulb at mellanox.com>
Reviewed-by: Roi Dayan <roid at mellanox.com>
---
lib/netdev-tc-offloads.c | 161 +++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 153 insertions(+), 8 deletions(-)
diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c
index bdde965..4c4e43f 100644
--- a/lib/netdev-tc-offloads.c
+++ b/lib/netdev-tc-offloads.c
@@ -239,9 +239,20 @@ int
netdev_tc_flow_dump_create(struct netdev *netdev,
struct netdev_flow_dump **dump_out)
{
- struct netdev_flow_dump *dump = xzalloc(sizeof *dump);
+ struct netdev_flow_dump *dump;
+ int ifindex;
+ ifindex = netdev_get_ifindex(netdev);
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&rl_err, "failed to get ifindex for %s: %s",
+ netdev_get_name(netdev), ovs_strerror(-ifindex));
+ return -ifindex;
+ }
+
+ dump = xzalloc(sizeof *dump);
+ dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
dump->netdev = netdev_ref(netdev);
+ tc_dump_flower_start(ifindex, dump->nl_dump);
*dump_out = dump;
@@ -251,21 +262,155 @@ netdev_tc_flow_dump_create(struct netdev *netdev,
int
netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
{
+ nl_dump_done(dump->nl_dump);
netdev_close(dump->netdev);
+ free(dump->nl_dump);
free(dump);
+ return 0;
+}
+
+static int
+parse_tc_flower_to_match(struct tc_flower *flower,
+ struct match *match,
+ struct nlattr **actions,
+ struct dpif_flow_stats *stats,
+ struct ofpbuf *buf) {
+ size_t act_off;
+ struct tc_flower_key *key = &flower->key;
+ struct tc_flower_key *mask = &flower->mask;
+ odp_port_t outport = 0;
+
+ if (flower->ifindex_out) {
+ outport = netdev_hmap_port_get_byifidx(flower->ifindex_out);
+ if (!outport) {
+ return ENOENT;
+ }
+ }
+
+ ofpbuf_clear(buf);
+
+ match_init_catchall(match);
+ match_set_dl_type(match, key->eth_type);
+ match_set_dl_src_masked(match, key->src_mac, mask->src_mac);
+ match_set_dl_dst_masked(match, key->dst_mac, mask->dst_mac);
+ if (key->vlan_id || key->vlan_prio) {
+ match_set_dl_vlan(match, htons(key->vlan_id));
+ match_set_dl_vlan_pcp(match, key->vlan_prio);
+ match_set_dl_type(match, key->encap_eth_type);
+ }
+
+ if (key->ip_proto &&
+ (key->eth_type == htons(ETH_P_IP)
+ || key->eth_type == htons(ETH_P_IPV6))) {
+ match_set_nw_proto(match, key->ip_proto);
+ }
+ match_set_nw_src_masked(match, key->ipv4.ipv4_src, mask->ipv4.ipv4_src);
+ match_set_nw_dst_masked(match, key->ipv4.ipv4_dst, mask->ipv4.ipv4_dst);
+
+ match_set_ipv6_src_masked(match,
+ (const struct in6_addr *) key->ipv6.ipv6_src,
+ (const struct in6_addr *) mask->ipv6.ipv6_src);
+ match_set_ipv6_dst_masked(match,
+ (const struct in6_addr *) key->ipv6.ipv6_dst,
+ (const struct in6_addr *) mask->ipv6.ipv6_dst);
+
+ match_set_tp_dst_masked(match, key->dst_port, mask->dst_port);
+ match_set_tp_src_masked(match, key->src_port, mask->src_port);
+
+ if (flower->tunnel.tunnel) {
+ match_set_tun_id(match, flower->tunnel.id);
+ match_set_tun_src(match, flower->tunnel.ipv4_src);
+ match_set_tun_dst(match, flower->tunnel.ipv4_dst);
+ match_set_tp_dst(match, flower->tunnel.tp_dst);
+ }
+
+ act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS);
+ {
+ if (flower->vlan_pop) {
+ nl_msg_put_flag(buf, OVS_ACTION_ATTR_POP_VLAN);
+ }
+
+ if (flower->vlan_push_id || flower->vlan_push_prio) {
+ struct ovs_action_push_vlan *push;
+ push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_VLAN,
+ sizeof *push);
+
+ push->vlan_tpid = htons(ETH_TYPE_VLAN);
+ push->vlan_tci = htons(flower->vlan_push_id
+ | (flower->vlan_push_prio << 13)
+ | VLAN_CFI);
+ }
+
+ if (flower->ifindex_out > 0) {
+ nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport));
+ }
+
+ if (flower->set.set) {
+ size_t set_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_SET);
+ size_t tunnel_offset =
+ nl_msg_start_nested(buf, OVS_KEY_ATTR_TUNNEL);
+
+ nl_msg_put_be64(buf, OVS_TUNNEL_KEY_ATTR_ID, flower->set.id);
+ nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+ flower->set.ipv4_src);
+ nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+ flower->set.ipv4_dst);
+ nl_msg_put_be16(buf, OVS_TUNNEL_KEY_ATTR_TP_DST,
+ flower->set.tp_dst);
+
+ nl_msg_end_nested(buf, tunnel_offset);
+ nl_msg_end_nested(buf, set_offset);
+ }
+ }
+ nl_msg_end_nested(buf, act_off);
+
+ *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
+
+ if (stats) {
+ memset(stats, 0, sizeof *stats);
+ stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
+ stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
+ stats->used = flower->lastused;
+ }
return 0;
}
bool
-netdev_tc_flow_dump_next(struct netdev_flow_dump *dump OVS_UNUSED,
- struct match *match OVS_UNUSED,
- struct nlattr **actions OVS_UNUSED,
- struct dpif_flow_stats *stats OVS_UNUSED,
- ovs_u128 *ufid OVS_UNUSED,
- struct ofpbuf *rbuffer OVS_UNUSED,
- struct ofpbuf *wbuffer OVS_UNUSED)
+netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
+ struct match *match,
+ struct nlattr **actions,
+ struct dpif_flow_stats *stats,
+ ovs_u128 *ufid,
+ struct ofpbuf *rbuffer,
+ struct ofpbuf *wbuffer)
{
+ struct ofpbuf nl_flow;
+
+ while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) {
+ struct tc_flower flower;
+ ovs_u128 uf;
+
+ if (parse_netlink_to_tc_flower(&nl_flow, &flower)) {
+ continue;
+ }
+
+ if (parse_tc_flower_to_match(&flower, match, actions, stats,
+ wbuffer)) {
+ continue;
+ }
+
+ if (!find_ufid(flower.prio, flower.handle, dump->netdev, &uf)) {
+ continue;
+ }
+
+ *ufid = uf;
+ match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
+ match->flow.in_port.odp_port = dump->port;
+
+ return true;
+ }
+
return false;
}
--
1.7.1
More information about the dev
mailing list