[ovs-dev] [PATCH V4 08/17] dpif-netdev: Update offloaded flows statistics
Ilya Maximets
i.maximets at ovn.org
Mon Dec 16 18:47:51 UTC 2019
On 16.12.2019 16:10, Eli Britstein wrote:
> From: Ophir Munk <ophirmu at mellanox.com>
>
> In case a flow is HW offloaded, packets do not reach the SW, thus not
> counted for statistics. Use netdev flow get API in order to update the
> statistics of flows by the HW statistics.
>
> Co-authored-by: Eli Britstein <elibr at mellanox.com>
> Signed-off-by: Ophir Munk <ophirmu at mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh at mellanox.com>
> Signed-off-by: Eli Britstein <elibr at mellanox.com>
> ---
> lib/dpif-netdev.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 68 insertions(+), 13 deletions(-)
Some issues of this patch:
1. stats are duplicated now in dpif-netdev. We already have them
in netdev-offload-dpdk, so we don't need to store them here too.
2. dpif_flow_attrs should be filled by the offload provider, not by
the datapath. netdev-offload-tc fills this information and it
will be lost in current implementation of dpif-netdev.
3. New 'dp_layer' types has to be documented in dpctl man.
Also, 'in_hw' doesn't look like a datapath layer name.
Suggesting to use 'dpdk' as netdev-offload-tc uses 'tc'.
We also could return specific dp_layer for partially offloaded
flows, i.e. 'ovs-partial'.
Suggesting following incremental patch where I tried to address above
issues (didn't update man, only compile tested, made on top of the full set):
-----
Subject: [PATCH] dpif-netdev: Cleanup for stats/attrs of offloaded flows.
Signed-off-by: Ilya Maximets <i.maximets at ovn.org>
---
lib/dpif-netdev.c | 146 +++++++++++++++++++-------------------
lib/netdev-offload-dpdk.c | 35 +++++----
2 files changed, 94 insertions(+), 87 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 91fefb877..892b2d0f5 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -530,7 +530,6 @@ struct dp_netdev_flow {
/* Statistics. */
struct dp_netdev_flow_stats stats;
- struct dp_netdev_flow_stats hw_stats;
/* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
@@ -3011,11 +3010,49 @@ dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
return NULL;
}
+static bool
+dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
+{
+ struct nlattr *actions;
+ struct ofpbuf wbuffer;
+ struct netdev *netdev;
+ struct match match;
+
+ int ret = 0;
+
+ if (!netdev_is_flow_api_enabled()) {
+ return false;
+ }
+
+ netdev = netdev_ports_get(netdev_flow->flow.in_port.odp_port, dp->class);
+ if (!netdev) {
+ return false;
+ }
+ /* Taking a global 'port_mutex' to fulfill thread safety
+ * restrictions for the netdev-offload-dpdk module. */
+ ovs_mutex_lock(&dp->port_mutex);
+ ret = netdev_flow_get(netdev, &match, &actions, &netdev_flow->mega_ufid,
+ stats, attrs, &wbuffer);
+ ovs_mutex_unlock(&dp->port_mutex);
+ netdev_close(netdev);
+ if (ret) {
+ return false;
+ }
+
+ return true;
+}
+
static void
-get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
- struct dpif_flow_stats *stats, bool hw)
+get_dpif_flow_status(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow_,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
{
- struct dp_netdev_flow_stats *flow_stats;
+ struct dpif_flow_stats offload_stats;
+ struct dpif_flow_attrs offload_attrs;
struct dp_netdev_flow *netdev_flow;
unsigned long long n;
long long used;
@@ -3023,16 +3060,29 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_);
- flow_stats = (hw) ? &netdev_flow->hw_stats : &netdev_flow->stats;
-
- atomic_read_relaxed(&flow_stats->packet_count, &n);
+ atomic_read_relaxed(&netdev_flow->stats.packet_count, &n);
stats->n_packets = n;
- atomic_read_relaxed(&flow_stats->byte_count, &n);
+ atomic_read_relaxed(&netdev_flow->stats.byte_count, &n);
stats->n_bytes = n;
- atomic_read_relaxed(&flow_stats->used, &used);
+ atomic_read_relaxed(&netdev_flow->stats.used, &used);
stats->used = used;
- atomic_read_relaxed(&flow_stats->tcp_flags, &flags);
+ atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
stats->tcp_flags = flags;
+
+ if (dpif_netdev_get_flow_offload_status(dp, netdev_flow,
+ &offload_stats, &offload_attrs)) {
+ stats->n_packets += offload_stats.n_packets;
+ stats->n_bytes += offload_stats.n_bytes;
+ stats->used = MAX(stats->used, offload_stats.used);
+ stats->tcp_flags |= offload_stats.tcp_flags;
+ if (attrs) {
+ attrs->offloaded = offload_attrs.offloaded;
+ attrs->dp_layer = offload_attrs.dp_layer;
+ }
+ } else if (attrs) {
+ attrs->offloaded = false;
+ attrs->dp_layer = "ovs";
+ }
}
/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
@@ -3040,12 +3090,11 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
* 'mask_buf'. Actions will be returned without copying, by relying on RCU to
* protect them. */
static void
-dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
+dp_netdev_flow_to_dpif_flow(const struct dp_netdev *dp,
+ const struct dp_netdev_flow *netdev_flow,
struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
- struct dpif_flow *flow, bool terse, bool offloaded)
+ struct dpif_flow *flow, bool terse)
{
- struct dpif_flow_stats hw_stats;
-
if (terse) {
memset(flow, 0, sizeof *flow);
} else {
@@ -3085,14 +3134,8 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
flow->ufid = netdev_flow->ufid;
flow->ufid_present = true;
flow->pmd_id = netdev_flow->pmd_id;
- get_dpif_flow_stats(netdev_flow, &flow->stats, false);
- get_dpif_flow_stats(netdev_flow, &hw_stats, true);
- flow->stats.n_packets += hw_stats.n_packets;
- flow->stats.n_bytes += hw_stats.n_bytes;
- flow->stats.used = MAX(flow->stats.used, hw_stats.used);
- flow->attrs.offloaded = offloaded;
- flow->attrs.dp_layer = flow->attrs.offloaded ? "in_hw" : "ovs";
+ get_dpif_flow_status(dp, netdev_flow, &flow->stats, &flow->attrs);
}
static int
@@ -3162,44 +3205,6 @@ dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
return 0;
}
-static bool
-dpif_netdev_offload_stats(struct dp_netdev_flow *netdev_flow,
- struct dp_netdev *dp)
-{
- struct dpif_flow_stats stats;
- struct dpif_flow_attrs attrs;
- struct nlattr *actions;
- struct ofpbuf wbuffer;
- struct netdev *netdev;
- struct match match;
-
- int ret = 0;
-
- if (!netdev_is_flow_api_enabled()) {
- return false;
- }
-
- netdev = netdev_ports_get(netdev_flow->flow.in_port.odp_port, dp->class);
- if (!netdev) {
- return false;
- }
- /* Taking a global 'port_mutex' to fulfill thread safety
- * restrictions for the netdev-offload-dpdk module. */
- ovs_mutex_lock(&dp->port_mutex);
- ret = netdev_flow_get(netdev, &match, &actions, &netdev_flow->mega_ufid,
- &stats, &attrs, &wbuffer);
- ovs_mutex_unlock(&dp->port_mutex);
- netdev_close(netdev);
- if (ret) {
- return false;
- }
- atomic_store_relaxed(&netdev_flow->hw_stats.packet_count, stats.n_packets);
- atomic_store_relaxed(&netdev_flow->hw_stats.byte_count, stats.n_bytes);
- atomic_store_relaxed(&netdev_flow->hw_stats.used, stats.used);
-
- return true;
-}
-
static int
dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
{
@@ -3233,9 +3238,8 @@ dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
get->key_len);
if (netdev_flow) {
- bool offloaded = dpif_netdev_offload_stats(netdev_flow, pmd->dp);
- dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
- get->flow, false, offloaded);
+ dp_netdev_flow_to_dpif_flow(dp, netdev_flow, get->buffer,
+ get->buffer, get->flow, false);
error = 0;
break;
} else {
@@ -3298,7 +3302,6 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
memset(&flow->stats, 0, sizeof flow->stats);
- memset(&flow->hw_stats, 0, sizeof flow->hw_stats);
flow->dead = false;
flow->batch = NULL;
flow->mark = INVALID_FLOW_MARK;
@@ -3411,7 +3414,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
put->actions, put->actions_len);
if (stats) {
- get_dpif_flow_stats(netdev_flow, stats, false);
+ get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
if (put->flags & DPIF_FP_ZERO_STATS) {
/* XXX: The userspace datapath uses thread local statistics
@@ -3530,7 +3533,7 @@ flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
del->key_len);
if (netdev_flow) {
if (stats) {
- get_dpif_flow_stats(netdev_flow, stats, false);
+ get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
dp_netdev_pmd_remove_flow(pmd, netdev_flow);
} else {
@@ -3664,16 +3667,13 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
= dpif_netdev_flow_dump_thread_cast(thread_);
struct dpif_netdev_flow_dump *dump = thread->dump;
struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
- bool offloaded[FLOW_DUMP_MAX_BATCH];
+ struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
+ struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
int n_flows = 0;
int i;
- memset(offloaded, 0, sizeof offloaded);
-
ovs_mutex_lock(&dump->mutex);
if (!dump->status) {
- struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
- struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
@@ -3699,8 +3699,6 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
netdev_flows[n_flows] = CONTAINER_OF(node,
struct dp_netdev_flow,
node);
- offloaded[n_flows] =
- dpif_netdev_offload_stats(netdev_flows[n_flows], pmd->dp);
}
/* When finishing dumping the current pmd thread, moves to
* the next. */
@@ -3732,8 +3730,8 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
- dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
- dump->up.terse, offloaded[i]);
+ dp_netdev_flow_to_dpif_flow(dp, netdev_flow, &key, &mask, f,
+ dump->up.terse);
}
return n_flows;
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 0ec840ce1..d1c066992 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -57,6 +57,7 @@ struct ufid_to_rte_flow_data {
ovs_u128 ufid;
struct rte_flow *rte_flow;
struct dpif_flow_stats stats;
+ bool actions_offloaded;
};
/* Find rte_flow with @ufid. */
@@ -77,7 +78,7 @@ ufid_to_rte_flow_data_find(const ovs_u128 *ufid)
static inline void
ufid_to_rte_flow_associate(const ovs_u128 *ufid,
- struct rte_flow *rte_flow)
+ struct rte_flow *rte_flow, bool actions_offloaded)
{
size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
struct ufid_to_rte_flow_data *data = xzalloc(sizeof *data);
@@ -96,6 +97,7 @@ ufid_to_rte_flow_associate(const ovs_u128 *ufid,
data->ufid = *ufid;
data->rte_flow = rte_flow;
+ data->actions_offloaded = actions_offloaded;
cmap_insert(&ufid_to_rte_flow,
CONST_CAST(struct cmap_node *, &data->node), hash);
@@ -1130,6 +1132,7 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev,
struct offload_info *info)
{
struct flow_patterns patterns = { .items = NULL, .cnt = 0 };
+ bool actions_offloaded = true;
struct rte_flow *flow;
int ret = 0;
@@ -1145,13 +1148,14 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev,
* actions.
*/
flow = netdev_offload_dpdk_mark_rss(&patterns, netdev, info->flow_mark);
+ actions_offloaded = false;
}
if (!flow) {
ret = -1;
goto out;
}
- ufid_to_rte_flow_associate(ufid, flow);
+ ufid_to_rte_flow_associate(ufid, flow, actions_offloaded);
VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT"\n",
netdev_get_name(netdev), flow, UUID_ARGS((struct uuid *)ufid));
@@ -1317,7 +1321,7 @@ netdev_offload_dpdk_flow_get(struct netdev *netdev,
struct nlattr **actions OVS_UNUSED,
const ovs_u128 *ufid,
struct dpif_flow_stats *stats,
- struct dpif_flow_attrs *attrs OVS_UNUSED,
+ struct dpif_flow_attrs *attrs,
struct ofpbuf *buf OVS_UNUSED)
{
struct rte_flow_query_count query = { .reset = 1 };
@@ -1331,18 +1335,23 @@ netdev_offload_dpdk_flow_get(struct netdev *netdev,
goto out;
}
- ret = netdev_dpdk_rte_flow_query_count(netdev, rte_flow_data->rte_flow,
- &query, &error);
- if (ret) {
- goto out;
- }
- rte_flow_data->stats.n_packets += (query.hits_set) ? query.hits : 0;
- rte_flow_data->stats.n_bytes += (query.bytes_set) ? query.bytes : 0;
- if (query.hits_set && query.hits) {
- rte_flow_data->stats.used = time_usec() / 1000;
+ attrs->offloaded = true;
+ if (rte_flow_data->actions_offloaded) {
+ attrs->dp_layer = "dpdk";
+ ret = netdev_dpdk_rte_flow_query_count(netdev, rte_flow_data->rte_flow,
+ &query, &error);
+ if (ret) {
+ goto out;
+ }
+ rte_flow_data->stats.n_packets += (query.hits_set) ? query.hits : 0;
+ rte_flow_data->stats.n_bytes += (query.bytes_set) ? query.bytes : 0;
+ if (query.hits_set && query.hits) {
+ rte_flow_data->stats.used = time_msec();
+ }
+ } else {
+ attrs->dp_layer = "ovs-partial";
}
memcpy(stats, &rte_flow_data->stats, sizeof *stats);
-
out:
return ret;
}
--
2.17.1
More information about the dev
mailing list