[ovs-dev] [hwol RFC v1 5/5] dpif-netdev: Read hw stats during flow_dump_next() call

Ophir Munk ophirmu at mellanox.com
Mon Dec 31 19:45:57 UTC 2018


Flow stats are retrieved by revalidator threads. Specifically for
dpif-netdev the function dpif_netdev_flow_dump_next() is called.
When the flow is fully offloaded reading the PMD SW stats alone will
result in no updates and will cause the revalidator to falsely delete
the flow after some aging time.
This commit adds a new function called dp_netdev_offload_used() which
reads the hw counters during the flow_dump_next() call.
The new function calls netdev_flow_stats_get() which in turn reads the
hw stats by calling DPDK rte_flow_query() API.

Signed-off-by: Ophir Munk <ophirmu at mellanox.com>
---
 lib/dpif-netdev.c | 44 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 41 insertions(+), 3 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 5d06036..dc6a989 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -3563,6 +3563,37 @@ dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
 }
 
 static int
+dp_netdev_offload_used(struct dp_netdev_flow *netdev_flow,
+                       struct dp_netdev_pmd_thread *pmd)
+{
+    int ret;
+    struct dp_netdev_port *port;
+    struct dpif_flow_stats stats;
+
+    odp_port_t in_port = netdev_flow->flow.in_port.odp_port;
+
+    ovs_mutex_lock(&pmd->dp->port_mutex);
+    port = dp_netdev_lookup_port(pmd->dp, in_port);
+    if (!port) {
+            ovs_mutex_unlock(&pmd->dp->port_mutex);
+            return -1;
+    }
+    /* get offloaded stats */
+    ret = netdev_flow_stats_get(port->netdev,
+                    &netdev_flow->mega_ufid, &stats);
+    ovs_mutex_unlock(&pmd->dp->port_mutex);
+    if (ret) {
+            return -1;
+    }
+    atomic_store_relaxed(&netdev_flow->stats.used,
+                    pmd->ctx.now / 1000);
+    non_atomic_ullong_add(&netdev_flow->stats.packet_count, stats.n_packets);
+    non_atomic_ullong_add(&netdev_flow->stats.byte_count, stats.n_bytes);
+
+    return 0;
+}
+
+static int
 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
                            struct dpif_flow *flows, int max_flows)
 {
@@ -3594,14 +3625,21 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
         do {
             for (n_flows = 0; n_flows < flow_limit; n_flows++) {
                 struct cmap_node *node;
+                struct dp_netdev_flow *flow;
 
                 node = cmap_next_position(&pmd->flow_table, &dump->flow_pos);
                 if (!node) {
                     break;
                 }
-                netdev_flows[n_flows] = CONTAINER_OF(node,
-                                                     struct dp_netdev_flow,
-                                                     node);
+                flow = netdev_flows[n_flows] = CONTAINER_OF(node,
+                                                    struct dp_netdev_flow,
+                                                    node);
+                /* Read offload stats in case ufid equals mega_ufid. */
+                if (netdev_is_flow_api_enabled() &&
+                   (!memcmp(&flow->ufid, &flow->mega_ufid,
+                            sizeof flow->ufid))) {
+                    dp_netdev_offload_used(flow, pmd);
+                }
             }
             /* When finishing dumping the current pmd thread, moves to
              * the next. */
-- 
1.8.3.1



More information about the dev mailing list