[ovs-dev] [PATCH v5 2/6] dpif-netdev: Add rxq processing cycle counters.
Darrell Ball
dball at vmware.com
Thu Aug 24 08:34:47 UTC 2017
On 8/23/17, 6:34 AM, "Kevin Traynor" <ktraynor at redhat.com> wrote:
Add counters to dp_netdev_rxq which will later be used for storing the
processing cycles of an rxq. Processing cycles will be stored in reference
to a defined time interval. We will store the cycles of the current in progress
interval, a number of completed intervals and the sum of the completed
intervals.
cycles_count_intermediate was used to count cycles for a pmd. With some small
additions we can also use it to count the cycles used for processing an rxq.
Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
lib/dpif-netdev.c | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index f35c079..51d4050 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -182,4 +182,8 @@ struct emc_cache {
#define DPCLS_OPTIMIZATION_INTERVAL 1000
+/* Number of intervals for which cycles are stored
+ * and used during rxq to pmd assignment. */
+#define PMD_RXQ_INTERVAL_MAX 6
+
struct dpcls {
struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */
@@ -340,4 +344,13 @@ enum pmd_cycles_counter_type {
};
+enum rxq_cycles_counter_type {
+ RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
+ processing packets during the current
+ interval. */
+ RXQ_CYCLES_PROC_HIST, /* Total cycles of all intervals that are used
+ during rxq to pmd assignment. */
+ RXQ_N_CYCLES
+};
+
#define XPS_TIMEOUT_MS 500LL
@@ -351,4 +364,9 @@ struct dp_netdev_rxq {
particular core. */
struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
+
+ /* Counters of cycles spent polling and processing packets. */
Do we need to specify ‘successfully’ polling to avoid confusion ?
+ atomic_ullong cycles[RXQ_N_CYCLES];
Do you think below deserves a separate description – ‘we store the last PMD_RXQ_INTERVAL_MAX intervals and sum them up to
to yield the cycles used for a given rxq…’ ?
+ atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
+ unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
};
@@ -677,5 +695,4 @@ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
static inline void
dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
-
static void
dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
@@ -3092,4 +3109,5 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
static inline void
cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
+ struct dp_netdev_rxq *rxq,
enum pmd_cycles_counter_type type)
OVS_NO_THREAD_SAFETY_ANALYSIS
@@ -3100,4 +3118,8 @@ cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
non_atomic_ullong_add(&pmd->cycles.n[type], interval);
+ if (rxq && (type == PMD_CYCLES_PROCESSING)) {
+ /* Add to the amount of current processing cycles. */
+ non_atomic_ullong_add(&rxq->cycles[RXQ_CYCLES_PROC_CURR], interval);
+ }
}
@@ -3668,5 +3690,5 @@ dpif_netdev_run(struct dpif *dpif)
port->rxqs[i].rx,
port->port_no);
- cycles_count_intermediate(non_pmd, process_packets ?
+ cycles_count_intermediate(non_pmd, NULL, process_packets ?
PMD_CYCLES_PROCESSING
: PMD_CYCLES_IDLE);
@@ -3855,5 +3877,5 @@ reload:
dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
poll_list[i].port_no);
- cycles_count_intermediate(pmd,
+ cycles_count_intermediate(pmd, NULL,
process_packets ? PMD_CYCLES_PROCESSING
: PMD_CYCLES_IDLE);
--
1.8.3.1
More information about the dev
mailing list