[ovs-dev] [PATCH v3 7/9] dpif-netdev: Add simple per pmd-thread cycles counters.

Daniele Di Proietto diproiettod at vmware.com
Fri Mar 27 13:26:07 UTC 2015


The counters use x86 TSC if available (currently only with DPDK). They
will be exposed by subsequents commits

Signed-off-by: Daniele Di Proietto <diproiettod at vmware.com>
---
 lib/dpif-netdev.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 70 insertions(+), 3 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index f7978ad..4543c30 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -228,6 +228,13 @@ enum dp_stat_type {
     DP_N_STATS
 };
 
+enum pmd_cycles_counter_type {
+    PMD_CYCLES_POLLING,         /* Cycles spent polling NICs. */
+    PMD_CYCLES_PROCESSING,      /* Cycles spent processing packets */
+    PMD_CYCLES_OTHER,           /* Cycles spent doing other tasks */
+    PMD_N_CYCLES
+};
+
 /* A port in a netdev-based datapath. */
 struct dp_netdev_port {
     struct cmap_node node;      /* Node in dp_netdev's 'ports'. */
@@ -346,6 +353,12 @@ struct dp_netdev_pmd_stats {
     unsigned long long int n[DP_N_STATS];
 };
 
+/* Contained by struct dp_netdev_pmd_thread's 'cycle' member.  */
+struct dp_netdev_pmd_cycles {
+    /* Indexed by PMD_CYCLES_*. */
+    uint64_t n[PMD_N_CYCLES];
+};
+
 /* PMD: Poll modes drivers.  PMD accesses devices via polling to eliminate
  * the performance overhead of interrupt processing.  Therefore netdev can
  * not implement rx-wait for these devices.  dpif-netdev needs to poll
@@ -387,10 +400,15 @@ struct dp_netdev_pmd_thread {
 
     /* Statistics. */
     struct dp_netdev_pmd_stats stats;
-    /* Used to protect 'stats'. Only guarantees consistency of single stats
-     * members, not of the structure as a whole */
+    /* Cycles counters */
+    struct dp_netdev_pmd_cycles cycles;
+    /* Used to protect 'stats' and 'cycles'. Only guarantees consistency of
+     * single members, not of the structure as a whole */
     struct u64_stats_sync stats_lock;
 
+    /* Used to count cicles. See 'pmd_cycles_counter_diff()' */
+    uint64_t last_cycles;
+
     struct latch exit_latch;        /* For terminating the pmd thread. */
     atomic_uint change_seq;         /* For reloading pmd ports. */
     pthread_t thread;
@@ -400,6 +418,10 @@ struct dp_netdev_pmd_thread {
     int numa_id;                    /* numa node id of this pmd thread. */
 };
 
+static inline uint64_t pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *);
+static inline void pmd_count_previous_cycles(struct dp_netdev_pmd_thread *,
+                                             enum pmd_cycles_counter_type);
+
 #define PMD_INITIAL_SEQ 1
 
 /* Interface to netdev-based datapath. */
@@ -2089,6 +2111,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
     if (pmd->core_id == NON_PMD_CORE_ID) {
         ovs_mutex_lock(&dp->non_pmd_mutex);
         ovs_mutex_lock(&dp->port_mutex);
+        pmd_cycles_counter_diff(pmd);
     }
 
     pp = execute->packet;
@@ -2098,6 +2121,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
         dp_netdev_pmd_unref(pmd);
         ovs_mutex_unlock(&dp->port_mutex);
         ovs_mutex_unlock(&dp->non_pmd_mutex);
+        pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING);
     }
 
     return 0;
@@ -2240,6 +2264,36 @@ dp_netdev_actions_free(struct dp_netdev_actions *actions)
 }
 
 
+/* This function returns the length of the interval since the last call
+ * to the function itself (with the same 'pmd' argument) */
+static inline uint64_t
+pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *pmd)
+{
+    uint64_t old_cycles = pmd->last_cycles,
+#ifdef DPDK_NETDEV
+             new_cycles = rte_get_tsc_cycles();
+#else
+             new_cycles = 0;
+#endif
+
+    pmd->last_cycles = new_cycles;
+
+    return new_cycles - old_cycles;
+}
+
+/* Updates the pmd cycles counter, considering the past cycles spent
+ * for the reason specified in 'type' */
+static inline void
+pmd_count_previous_cycles(struct dp_netdev_pmd_thread *pmd,
+                          enum pmd_cycles_counter_type type)
+{
+    uint32_t c;
+
+    c = u64_stats_write_begin(&pmd->stats_lock);
+    pmd->cycles.n[type] += pmd_cycles_counter_diff(pmd);
+    u64_stats_write_end(&pmd->stats_lock, c);
+}
+
 static void
 dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
                            struct dp_netdev_port *port,
@@ -2252,6 +2306,8 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
     if (!error) {
         int i;
 
+        pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING);
+
         *recirc_depth_get() = 0;
 
         /* XXX: initialize md in netdev implementation. */
@@ -2259,6 +2315,7 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
             packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no);
         }
         dp_netdev_input(pmd, packets, cnt);
+        pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING);
     } else if (error != EAGAIN && error != EOPNOTSUPP) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
@@ -2278,6 +2335,7 @@ dpif_netdev_run(struct dpif *dpif)
     uint64_t new_tnl_seq;
 
     ovs_mutex_lock(&dp->non_pmd_mutex);
+    pmd_count_previous_cycles(non_pmd, PMD_CYCLES_OTHER);
     CMAP_FOR_EACH (port, node, &dp->ports) {
         if (!netdev_is_pmd(port->netdev)) {
             int i;
@@ -2388,6 +2446,10 @@ pmd_thread_main(void *f_)
     /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
     ovsthread_setspecific(pmd->dp->per_pmd_key, pmd);
     pmd_thread_setaffinity_cpu(pmd->core_id);
+
+    /* Initialize the cycles counter */
+    pmd_cycles_counter_diff(pmd);
+
 reload:
     emc_cache_init(&pmd->flow_cache);
     poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt);
@@ -2396,6 +2458,7 @@ reload:
      * reloading the updated configuration. */
     dp_netdev_pmd_reload_done(pmd);
 
+    pmd_count_previous_cycles(pmd, PMD_CYCLES_OTHER);
     for (;;) {
         int i;
 
@@ -2406,6 +2469,8 @@ reload:
         if (lc++ > 1024) {
             unsigned int seq;
 
+            pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING);
+
             lc = 0;
 
             emc_cache_slow_sweep(&pmd->flow_cache);
@@ -2416,6 +2481,7 @@ reload:
                 port_seq = seq;
                 break;
             }
+
         }
     }
 
@@ -2556,10 +2622,11 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
     ovs_mutex_init(&pmd->flow_mutex);
     dpcls_init(&pmd->cls);
     cmap_init(&pmd->flow_table);
-    /* init the 'flow_cache' since there is no
+    /* init the 'flow_cache' and cycles counter since there is no
      * actual thread created for NON_PMD_CORE_ID. */
     if (core_id == NON_PMD_CORE_ID) {
         emc_cache_init(&pmd->flow_cache);
+        pmd_cycles_counter_diff(pmd);
     }
     cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node),
                 hash_int(core_id, 0));
-- 
2.1.4




More information about the dev mailing list