[ovs-dev] [PATCH v2 4/7] dpif-netdev: Count the rxq processing cycles for an rxq.

Kevin Traynor ktraynor at redhat.com
Fri Jul 21 09:04:42 UTC 2017


Count the cycles used for processing an rxq during the pmd
optimization interval. As this is an in flight counter and
pmds run independently, also store the total cycles used
during the last full interval.

Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
 lib/dpif-netdev.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 273db38..185de9b 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -678,5 +678,6 @@ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
     OVS_REQUIRES(pmd->port_mutex);
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt);
 static void
 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
@@ -3167,5 +3168,5 @@ port_reconfigure(struct dp_netdev_port *port)
 {
     struct netdev *netdev = port->netdev;
-    int i, err;
+    int i, err, last_nrxq;
 
     port->need_reconfigure = false;
@@ -3176,4 +3177,5 @@ port_reconfigure(struct dp_netdev_port *port)
         port->rxqs[i].rx = NULL;
     }
+    last_nrxq = port->n_rxq;
     port->n_rxq = 0;
 
@@ -3196,4 +3198,9 @@ port_reconfigure(struct dp_netdev_port *port)
     for (i = 0; i < netdev_n_rxq(netdev); i++) {
         port->rxqs[i].port = port;
+        if (i >= last_nrxq) {
+            /* Only reset cycle stats for new queues */
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_CURR, 0);
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_LAST, 0);
+        }
         err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
         if (err) {
@@ -3789,5 +3796,5 @@ reload:
                 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
                                            poll_list[i].port_no);
-            cycles_count_intermediate(pmd, NULL,
+            cycles_count_intermediate(pmd, poll_list[i].rxq,
                                       process_packets ? PMD_CYCLES_PROCESSING
                                                       : PMD_CYCLES_IDLE);
@@ -3800,5 +3807,5 @@ reload:
 
             coverage_try_clear();
-            dp_netdev_pmd_try_optimize(pmd);
+            dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
             if (!ovsrcu_try_quiesce()) {
                 emc_cache_slow_sweep(&pmd->flow_cache);
@@ -5678,8 +5685,11 @@ dpcls_sort_subtable_vector(struct dpcls *cls)
 
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt)
 {
     struct dpcls *cls;
     long long int now = time_msec();
+    int i;
+    uint64_t rxq_cyc_curr;
 
     if (now > pmd->next_optimization) {
@@ -5693,4 +5703,14 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
             ovs_mutex_unlock(&pmd->flow_mutex);
         }
+
+        /* Get the cycles that were used to process each queue and store. */
+        for (i = 0; i < poll_cnt; i++) {
+            rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
+                                                    RXQ_CYCLES_PROC_CURR);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_LAST,
+                                     rxq_cyc_curr);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
+                                     0);
+        }
         /* Start new measuring interval */
         pmd->next_optimization = now + PMD_OPTIMIZATION_INTERVAL;
-- 
1.8.3.1



More information about the dev mailing list