[ovs-dev] [PATCH V2 11/19] dpif-netdev: Read hw stats during flow_dump_next() call
Eli Britstein
elibr at mellanox.com
Mon Dec 2 08:41:45 UTC 2019
From: Ophir Munk <ophirmu at mellanox.com>
Use netdev dump flow next API in order to update the statistics of fully
offloaded flows.
Co-authored-by: Eli Britstein <elibr at mellanox.com>
Signed-off-by: Ophir Munk <ophirmu at mellanox.com>
Reviewed-by: Oz Shlomo <ozsh at mellanox.com>
Signed-off-by: Eli Britstein <elibr at mellanox.com>
---
lib/dpif-netdev.c | 42 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 40 insertions(+), 2 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 5142bad1d..bfeb1e7b0 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -527,6 +527,7 @@ struct dp_netdev_flow {
bool dead;
uint32_t mark; /* Unique flow mark assigned to a flow */
+ bool actions_offloaded; /* true if flow is fully offloaded */
/* Statistics. */
struct dp_netdev_flow_stats stats;
@@ -2410,6 +2411,7 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload)
}
}
info.flow_mark = mark;
+ info.actions_offloaded = &flow->actions_offloaded;
ovs_mutex_lock(&pmd->dp->port_mutex);
port = dp_netdev_lookup_port(pmd->dp, in_port);
@@ -3073,8 +3075,8 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
flow->pmd_id = netdev_flow->pmd_id;
get_dpif_flow_stats(netdev_flow, &flow->stats);
- flow->attrs.offloaded = false;
- flow->attrs.dp_layer = "ovs";
+ flow->attrs.offloaded = netdev_flow->actions_offloaded;
+ flow->attrs.dp_layer = flow->attrs.offloaded ? "in_hw" : "ovs";
}
static int
@@ -3244,6 +3246,7 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
flow->dead = false;
flow->batch = NULL;
flow->mark = INVALID_FLOW_MARK;
+ flow->actions_offloaded = false;
*CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
*CONST_CAST(struct flow *, &flow->flow) = match->flow;
*CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
@@ -3598,6 +3601,37 @@ dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
free(thread);
}
+static int
+dpif_netdev_offload_used(struct dp_netdev_flow *netdev_flow,
+ struct dp_netdev_pmd_thread *pmd)
+{
+ struct netdev_flow_dump netdev_dump;
+ struct dpif_flow_stats stats;
+ ovs_u128 ufid;
+ bool has_next;
+
+ netdev_dump.port = netdev_flow->flow.in_port.odp_port;
+ netdev_dump.netdev = netdev_ports_get(netdev_dump.port, pmd->dp->class);
+ if (!netdev_dump.netdev) {
+ return -1;
+ }
+ /* get offloaded stats */
+ ufid = netdev_flow->mega_ufid;
+ has_next = netdev_flow_dump_next(&netdev_dump, NULL, NULL, &stats, NULL,
+ &ufid, NULL, NULL);
+ netdev_close(netdev_dump.netdev);
+ if (!has_next) {
+ return -1;
+ }
+ if (stats.n_packets) {
+ atomic_store_relaxed(&netdev_flow->stats.used, pmd->ctx.now / 1000);
+ non_atomic_ullong_add(&netdev_flow->stats.packet_count, stats.n_packets);
+ non_atomic_ullong_add(&netdev_flow->stats.byte_count, stats.n_bytes);
+ }
+
+ return 0;
+}
+
static int
dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
struct dpif_flow *flows, int max_flows)
@@ -3638,6 +3672,10 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
netdev_flows[n_flows] = CONTAINER_OF(node,
struct dp_netdev_flow,
node);
+ /* Read hardware stats in case of hardware offload */
+ if (netdev_flows[n_flows]->actions_offloaded) {
+ dpif_netdev_offload_used(netdev_flows[n_flows], pmd);
+ }
}
/* When finishing dumping the current pmd thread, moves to
* the next. */
--
2.14.5
More information about the dev
mailing list