[ovs-dev] [PATCH v4 3/3] dpif-netdev: Calculate rxq cycles prior to compare_rxq_cycles calls.
Kevin Traynor
ktraynor at redhat.com
Thu Nov 23 19:41:57 UTC 2017
compare_rxq_cycles sums the latest cycles from each queue for
comparison with each other. While each comparison correctly
gets the latest cycles, the cycles could change between calls
to compare_rxq_cycle. In order to use consistent values through
each call of compare_rxq_cycles, sum the cycles before qsort is
called.
Requested-by: Ilya Maximets <i.maximets at samsung.com>
Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
V4: Rebased to apply after patches 1/3 and 2/3
V3:
- Drop V2 1/2 as merged
- Removed rxq_cycle_sort return changes for a later patch
- Some minor variable rename and comment changes
lib/dpif-netdev.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 657df71..c31c09e 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -3450,20 +3450,14 @@ compare_rxq_cycles(const void *a, const void *b)
struct dp_netdev_rxq *qa;
struct dp_netdev_rxq *qb;
- uint64_t total_qa, total_qb;
- unsigned i;
+ uint64_t cycles_qa, cycles_qb;
qa = *(struct dp_netdev_rxq **) a;
qb = *(struct dp_netdev_rxq **) b;
- total_qa = total_qb = 0;
- for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
- total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
- total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
- }
- dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
- dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
+ cycles_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
+ cycles_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
- if (total_qa != total_qb) {
- return (total_qa < total_qb) ? 1 : -1;
+ if (cycles_qa != cycles_qb) {
+ return (cycles_qa < cycles_qb) ? 1 : -1;
} else {
/* Cycles are the same so tiebreak on port/queue id.
@@ -3521,4 +3515,6 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
}
} else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
+ uint64_t cycle_hist = 0;
+
if (n_rxqs == 0) {
rxqs = xmalloc(sizeof *rxqs);
@@ -3526,4 +3522,10 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
}
+ /* Sum the queue intervals and store the cycle history. */
+ for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
+ cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
+ }
+ dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, cycle_hist);
+
/* Store the queue. */
rxqs[n_rxqs++] = q;
--
1.8.3.1
More information about the dev
mailing list