[ovs-dev] [PATCH] dpif-netdev: Change definitions of 'idle' & 'processing' cycles
Ciara Loftus
ciara.loftus at intel.com
Thu Jan 12 11:44:17 UTC 2017
Instead of counting all polling cycles as processing cycles, only count
the cycles where packets were received from the polling.
Signed-off-by: Georg Schmuecking <georg.schmuecking at ericsson.com>
Signed-off-by: Ciara Loftus <ciara.loftus at intel.com>
Co-authored-by: Ciara Loftus <ciara.loftus at intel.com>
---
lib/dpif-netdev.c | 55 +++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 43 insertions(+), 12 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 546a1e9..da6f8fe 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -272,7 +272,10 @@ enum dp_stat_type {
enum pmd_cycles_counter_type {
PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */
- PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */
+ PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful polling */
+ PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
+ * processing polled packets */
+
PMD_N_CYCLES
};
@@ -731,10 +734,10 @@ pmd_info_show_stats(struct ds *reply,
}
ds_put_format(reply,
- "\tpolling cycles:%"PRIu64" (%.02f%%)\n"
+ "\tidle cycles:%"PRIu64" (%.02f%%)\n"
"\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
- cycles[PMD_CYCLES_POLLING],
- cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100,
+ cycles[PMD_CYCLES_IDLE],
+ cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
cycles[PMD_CYCLES_PROCESSING],
cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
@@ -2873,30 +2876,43 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
non_atomic_ullong_add(&pmd->cycles.n[type], interval);
}
-static void
+/* Calculate the intermediate cycle result and add to the counter 'type' */
+static inline void
+cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
+ enum pmd_cycles_counter_type type)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ unsigned long long new_cycles = cycles_counter();
+ unsigned long long interval = new_cycles - pmd->last_cycles;
+ pmd->last_cycles = new_cycles;
+
+ non_atomic_ullong_add(&pmd->cycles.n[type], interval);
+}
+
+static int
dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_port *port,
struct netdev_rxq *rxq)
{
struct dp_packet_batch batch;
int error;
+ int batch_cnt = 0;
dp_packet_batch_init(&batch);
- cycles_count_start(pmd);
error = netdev_rxq_recv(rxq, &batch);
- cycles_count_end(pmd, PMD_CYCLES_POLLING);
if (!error) {
*recirc_depth_get() = 0;
- cycles_count_start(pmd);
+ batch_cnt = batch.count;
dp_netdev_input(pmd, &batch, port->port_no);
- cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
netdev_get_name(port->netdev), ovs_strerror(error));
}
+
+ return batch_cnt;
}
static int
@@ -3006,21 +3022,26 @@ dpif_netdev_run(struct dpif *dpif)
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_pmd_thread *non_pmd;
uint64_t new_tnl_seq;
+ int process_packets = 0;
ovs_mutex_lock(&dp->port_mutex);
non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex);
+ cycles_count_start(non_pmd);
HMAP_FOR_EACH (port, node, &dp->ports) {
if (!netdev_is_pmd(port->netdev)) {
int i;
for (i = 0; i < port->n_rxq; i++) {
- dp_netdev_process_rxq_port(non_pmd, port,
- port->rxqs[i].rxq);
+ process_packets +=
+ dp_netdev_process_rxq_port(non_pmd, port,
+ port->rxqs[i].rxq);
}
}
}
+ cycles_count_end(non_pmd, process_packets ? PMD_CYCLES_PROCESSING
+ : PMD_CYCLES_IDLE);
dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
ovs_mutex_unlock(&dp->non_pmd_mutex);
@@ -3131,6 +3152,7 @@ pmd_thread_main(void *f_)
bool exiting;
int poll_cnt;
int i;
+ int process_packets = 0;
poll_list = NULL;
@@ -3149,9 +3171,12 @@ reload:
netdev_rxq_get_queue_id(poll_list[i].rx));
}
+ cycles_count_start(pmd);
for (;;) {
for (i = 0; i < poll_cnt; i++) {
- dp_netdev_process_rxq_port(pmd, poll_list[i].port, poll_list[i].rx);
+ process_packets += dp_netdev_process_rxq_port(pmd,
+ poll_list[i].port,
+ poll_list[i].rx);
}
if (lc++ > 1024) {
@@ -3171,8 +3196,14 @@ reload:
break;
}
}
+ cycles_count_intermediate(pmd, process_packets ? PMD_CYCLES_PROCESSING
+ : PMD_CYCLES_IDLE);
+ process_packets = 0;
}
+ cycles_count_end(pmd, process_packets ? PMD_CYCLES_PROCESSING
+ : PMD_CYCLES_IDLE);
+
poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
exiting = latch_is_set(&pmd->exit_latch);
/* Signal here to make sure the pmd finishes
--
2.4.3
More information about the dev
mailing list