[ovs-dev] [PATCH 2/3] dpif-netdev: Add rxq idle cycle counters.

Kevin Traynor ktraynor at redhat.com
Wed Dec 13 13:27:16 UTC 2017


This commit lays the groundwork for displaying stats about the cpu
consumption used by an rxq. Displaying the processing cycles we
measure for an rxq is not user friendly, so we will also collect
rxq idle polling information. That will later allow us to show the
percentage of a pmd/core that an rxq is using.

Collecting this new information means it is necessary to add some
new counters. The existing stats counters are reworked and extended
to allow a grouping of idle and processing cycles for an rxq.

Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
 lib/dpif-netdev.c | 148 +++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 95 insertions(+), 53 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 2f669dc..9a1f38b 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -348,13 +348,4 @@ enum pmd_cycles_counter_type {
 };
 
-enum rxq_cycles_counter_type {
-    RXQ_CYCLES_PROC_CURR,       /* Cycles spent successfully polling and
-                                   processing packets during the current
-                                   interval. */
-    RXQ_CYCLES_PROC_HIST,       /* Total cycles of all intervals that are used
-                                   during rxq to pmd assignment. */
-    RXQ_N_CYCLES
-};
-
 #define XPS_TIMEOUT_MS 500LL
 
@@ -367,12 +358,14 @@ struct dp_netdev_rxq {
                                           queue doesn't need to be pinned to a
                                           particular core. */
-    unsigned intrvl_idx;               /* Write index for 'cycles_intrvl'. */
+    unsigned intrvl_idx;               /* Write index for 'intrvl_cycles'. */
     struct dp_netdev_pmd_thread *pmd;  /* pmd thread that polls this queue. */
 
-    /* Counters of cycles spent successfully polling and processing pkts. */
-    atomic_ullong cycles[RXQ_N_CYCLES];
-    /* We store PMD_RXQ_INTERVAL_MAX intervals of data for an rxq and then
-       sum them to yield the cycles used for an rxq. */
-    atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
+    /* curr_cycles is updated when an rxq is polled. */
+    atomic_ullong curr_cycles[PMD_N_CYCLES];
+    /* intrvl_cycles is the sum of curr_cycles for an interval
+       as defined by PMD_RXQ_INTERVAL_LEN. */
+    atomic_ullong intrvl_cycles[PMD_RXQ_INTERVAL_MAX][PMD_N_CYCLES];
+    /* total_cycles is the sum of processing cycles for an rxq. */
+    atomic_ullong total_cycles;
 };
 
@@ -706,15 +699,23 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
                            struct polled_queue *poll_list, int poll_cnt);
 static void
-dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
-                         enum rxq_cycles_counter_type type,
-                         unsigned long long cycles);
+dp_netdev_rxq_set_total_cycles(struct dp_netdev_rxq *rx,
+                               unsigned long long cycles);
 static uint64_t
-dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
-                         enum rxq_cycles_counter_type type);
+dp_netdev_rxq_get_total_cycles(struct dp_netdev_rxq *rx);
+static void
+dp_netdev_rxq_set_curr_cycles(struct dp_netdev_rxq *rx,
+                              enum pmd_cycles_counter_type type,
+                              unsigned long long cycles);
+static uint64_t
+dp_netdev_rxq_get_curr_cycles(struct dp_netdev_rxq *rx,
+                              enum pmd_cycles_counter_type type);
 static void
 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
-                           unsigned long long cycles);
+                                enum pmd_cycles_counter_type type,
+                                unsigned long long cycles);
 static uint64_t
-dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx);
+dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx,
+                                enum pmd_cycles_counter_type type,
+                                unsigned idx);
 static void
 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
@@ -3177,41 +3178,65 @@ cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
 
     non_atomic_ullong_add(&pmd->cycles.n[type], interval);
-    if (rxq && (type == PMD_CYCLES_PROCESSING)) {
-        /* Add to the amount of current processing cycles. */
-        non_atomic_ullong_add(&rxq->cycles[RXQ_CYCLES_PROC_CURR], interval);
+    if (rxq) {
+        /* Add to the amount of current cycles. */
+        non_atomic_ullong_add(&rxq->curr_cycles[type], interval);
     }
 }
 
 static void
-dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
-                         enum rxq_cycles_counter_type type,
-                         unsigned long long cycles)
+dp_netdev_rxq_set_curr_cycles(struct dp_netdev_rxq *rx,
+                              enum pmd_cycles_counter_type type,
+                              unsigned long long cycles)
 {
-   atomic_store_relaxed(&rx->cycles[type], cycles);
+   atomic_store_relaxed(&rx->curr_cycles[type], cycles);
 }
 
 static uint64_t
-dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
-                         enum rxq_cycles_counter_type type)
+dp_netdev_rxq_get_curr_cycles(struct dp_netdev_rxq *rx,
+                              enum pmd_cycles_counter_type type)
 {
-    unsigned long long processing_cycles;
-    atomic_read_relaxed(&rx->cycles[type], &processing_cycles);
-    return processing_cycles;
+    unsigned long long cycles;
+    atomic_read_relaxed(&rx->curr_cycles[type], &cycles);
+    return cycles;
+}
+
+static void
+dp_netdev_rxq_set_total_cycles(struct dp_netdev_rxq *rx,
+                               unsigned long long cycles)
+{
+   atomic_store_relaxed(&rx->total_cycles, cycles);
+}
+
+static uint64_t
+dp_netdev_rxq_get_total_cycles(struct dp_netdev_rxq *rx)
+{
+    unsigned long long cycles;
+    atomic_read_relaxed(&rx->total_cycles, &cycles);
+    return cycles;
 }
 
 static void
 dp_netdev_rxq_set_intrvl_cycles(struct dp_netdev_rxq *rx,
+                                enum pmd_cycles_counter_type type,
                                 unsigned long long cycles)
 {
-    unsigned int idx = rx->intrvl_idx++ % PMD_RXQ_INTERVAL_MAX;
-    atomic_store_relaxed(&rx->cycles_intrvl[idx], cycles);
+    unsigned int idx = rx->intrvl_idx % PMD_RXQ_INTERVAL_MAX;
+    atomic_store_relaxed(&rx->intrvl_cycles[idx][type], cycles);
 }
 
 static uint64_t
-dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx, unsigned idx)
+dp_netdev_rxq_get_intrvl_cycles(struct dp_netdev_rxq *rx,
+                                enum pmd_cycles_counter_type type,
+                                unsigned idx)
 {
-    unsigned long long processing_cycles;
-    atomic_read_relaxed(&rx->cycles_intrvl[idx], &processing_cycles);
-    return processing_cycles;
+    unsigned long long cycles;
+    atomic_read_relaxed(&rx->intrvl_cycles[idx][type],
+                        &cycles);
+    return cycles;
+}
+
+static void
+dp_netdev_rxq_inc_intrvl(struct dp_netdev_rxq *rx) {
+    rx->intrvl_idx++;
 }
 
@@ -3436,6 +3461,6 @@ compare_rxq_cycles(const void *a, const void *b)
     qb = *(struct dp_netdev_rxq **) b;
 
-    cycles_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
-    cycles_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
+    cycles_qa = dp_netdev_rxq_get_total_cycles(qa);
+    cycles_qb = dp_netdev_rxq_get_total_cycles(qb);
 
     if (cycles_qa != cycles_qb) {
@@ -3507,7 +3532,8 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
                 /* Sum the queue intervals and store the cycle history. */
                 for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
-                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
+                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q,
+                                      PMD_CYCLES_PROCESSING, i);
                 }
-                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, cycle_hist);
+                dp_netdev_rxq_set_total_cycles(q, cycle_hist);
 
                 /* Store the queue. */
@@ -3556,5 +3582,5 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
                   netdev_rxq_get_name(rxqs[i]->rx),
                   netdev_rxq_get_queue_id(rxqs[i]->rx),
-                  dp_netdev_rxq_get_cycles(rxqs[i], RXQ_CYCLES_PROC_HIST));
+                  dp_netdev_rxq_get_total_cycles(rxqs[i]));
         }
     }
@@ -4052,6 +4078,8 @@ reload:
                 pmd->core_id, netdev_rxq_get_name(poll_list[i].rxq->rx),
                 netdev_rxq_get_queue_id(poll_list[i].rxq->rx));
-       /* Reset the rxq current cycles counter. */
-       dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR, 0);
+       /* Reset the rxq current cycles counters. */
+       dp_netdev_rxq_set_curr_cycles(poll_list[i].rxq, PMD_CYCLES_IDLE, 0);
+       dp_netdev_rxq_set_curr_cycles(poll_list[i].rxq,
+                                     PMD_CYCLES_PROCESSING, 0);
     }
 
@@ -6005,11 +6033,25 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
 
     if (now > pmd->rxq_next_cycle_store) {
-        /* Get the cycles that were used to process each queue and store. */
+        /* Get the cycles for each queue and store. */
         for (unsigned i = 0; i < poll_cnt; i++) {
-            uint64_t rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
-                                                        RXQ_CYCLES_PROC_CURR);
-            dp_netdev_rxq_set_intrvl_cycles(poll_list[i].rxq, rxq_cyc_curr);
-            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
-                                     0);
+            struct dp_netdev_rxq *rxq = poll_list[i].rxq;
+            uint64_t idle_cycles, proc_cycles;
+
+            /* Get the current cycle counters. */
+            idle_cycles = dp_netdev_rxq_get_curr_cycles(
+                              rxq, PMD_CYCLES_IDLE);
+            proc_cycles = dp_netdev_rxq_get_curr_cycles(
+                              rxq, PMD_CYCLES_PROCESSING);
+
+            /* Store the current cycle counters into the next interval. */
+            dp_netdev_rxq_set_intrvl_cycles(rxq, PMD_CYCLES_IDLE,
+                                            idle_cycles);
+            dp_netdev_rxq_set_intrvl_cycles(rxq, PMD_CYCLES_PROCESSING,
+                                            proc_cycles);
+            dp_netdev_rxq_inc_intrvl(rxq);
+
+            /* Reset the current cycle counters. */
+            dp_netdev_rxq_set_curr_cycles(rxq, PMD_CYCLES_IDLE, 0);
+            dp_netdev_rxq_set_curr_cycles(rxq, PMD_CYCLES_PROCESSING, 0);
         }
         /* Start new measuring interval */
-- 
1.8.3.1



More information about the dev mailing list