[ovs-dev] [PATCH v3 3/6] dpif-netdev: Count the rxq processing cycles for an rxq.

Kevin Traynor ktraynor at redhat.com
Tue Aug 1 15:58:09 UTC 2017


Count the cycles used for processing an rxq during the
pmd rxq interval. As this is an in flight counter and
pmds run independently, also store the total cycles used
during the last full interval.

Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
 lib/dpif-netdev.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 60 insertions(+), 5 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 0a4daf9..25a521a 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -181,4 +181,8 @@ struct emc_cache {
 #define DPCLS_OPTIMIZATION_INTERVAL 1000
 
+/* Time in ms of the interval in which rxq processing cycles used in
+ * rxq to pmd assignments is measured and stored. */
+#define PMD_RXQ_INTERVAL 1000
+
 struct dpcls {
     struct cmap_node node;      /* Within dp_netdev_pmd_thread.classifiers */
@@ -554,4 +558,6 @@ struct dp_netdev_pmd_thread {
     /* Periodically sort subtable vectors according to hit frequencies */
     long long int next_optimization;
+    /* Periodically store the processing cycles used for each rxq. */
+    long long int rxq_interval;
 
     /* Statistics. */
@@ -678,5 +684,13 @@ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
     OVS_REQUIRES(pmd->port_mutex);
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt);
+static void
+dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
+                         enum rxq_cycles_counter_type type,
+                         unsigned long long cycles);
+static uint64_t
+dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
+                         enum rxq_cycles_counter_type type);
 static void
 dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
@@ -3099,4 +3113,21 @@ cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
 }
 
+static void
+dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
+                         enum rxq_cycles_counter_type type,
+                         unsigned long long cycles)
+{
+   atomic_store_relaxed(&rx->cycles[type], cycles);
+}
+
+static uint64_t
+dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
+                         enum rxq_cycles_counter_type type)
+{
+    unsigned long long tmp;
+    atomic_read_relaxed(&rx->cycles[type], &tmp);
+    return tmp;
+}
+
 static int
 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
@@ -3143,5 +3174,5 @@ port_reconfigure(struct dp_netdev_port *port)
 {
     struct netdev *netdev = port->netdev;
-    int i, err;
+    int i, err, last_nrxq;
 
     port->need_reconfigure = false;
@@ -3152,4 +3183,5 @@ port_reconfigure(struct dp_netdev_port *port)
         port->rxqs[i].rx = NULL;
     }
+    last_nrxq = port->n_rxq;
     port->n_rxq = 0;
 
@@ -3172,4 +3204,9 @@ port_reconfigure(struct dp_netdev_port *port)
     for (i = 0; i < netdev_n_rxq(netdev); i++) {
         port->rxqs[i].port = port;
+        if (i >= last_nrxq) {
+            /* Only reset cycle stats for new queues */
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_CURR, 0);
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_LAST, 0);
+        }
         err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
         if (err) {
@@ -3765,5 +3802,5 @@ reload:
                 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
                                            poll_list[i].port_no);
-            cycles_count_intermediate(pmd, NULL,
+            cycles_count_intermediate(pmd, poll_list[i].rxq,
                                       process_packets ? PMD_CYCLES_PROCESSING
                                                       : PMD_CYCLES_IDLE);
@@ -3776,5 +3813,5 @@ reload:
 
             coverage_try_clear();
-            dp_netdev_pmd_try_optimize(pmd);
+            dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
             if (!ovsrcu_try_quiesce()) {
                 emc_cache_slow_sweep(&pmd->flow_cache);
@@ -4221,4 +4258,5 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
     cmap_init(&pmd->classifiers);
     pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL;
+    pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL;
     hmap_init(&pmd->poll_list);
     hmap_init(&pmd->tx_ports);
@@ -5654,8 +5692,25 @@ dpcls_sort_subtable_vector(struct dpcls *cls)
 
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt)
 {
     struct dpcls *cls;
     long long int now = time_msec();
+    int i;
+    uint64_t rxq_cyc_curr;
+
+    if (now > pmd->rxq_interval) {
+        /* Get the cycles that were used to process each queue and store. */
+        for (i = 0; i < poll_cnt; i++) {
+            rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
+                                                    RXQ_CYCLES_PROC_CURR);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_LAST,
+                                     rxq_cyc_curr);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
+                                     0);
+        }
+        /* Start new measuring interval */
+        pmd->rxq_interval = now + PMD_RXQ_INTERVAL;
+    }
 
     if (now > pmd->next_optimization) {
-- 
1.8.3.1



More information about the dev mailing list