[ovs-dev] [PATCH v3 16/28] dpif-netdev: Quiesce offload thread periodically

Gaetan Rivet grive at u256.net
Sun Apr 25 11:55:30 UTC 2021


Similar to what was done for the PMD threads [1], reduce the performance
impact of quiescing too often in the offload thread.

After each processed offload, the offload thread currently quiesce and
will sync with RCU. This synchronization can be lengthy and make the
thread unnecessary slow.

Instead attempt to quiesce every 10 ms at most. While the queue is
empty, the offload thread remains quiescent.

[1]: 81ac8b3b194c ("dpif-netdev: Do RCU synchronization at fixed interval
     in PMD main loop.")

Signed-off-by: Gaetan Rivet <grive at u256.net>
Reviewed-by: Eli Britstein <elibr at nvidia.com>
---
 lib/dpif-netdev.c | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index d458bcb12..44e5735b2 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -2740,15 +2740,20 @@ err_free:
     return -1;
 }
 
+#define DP_NETDEV_OFFLOAD_QUIESCE_INTERVAL_US (10 * 1000) /* 10 ms */
+
 static void *
 dp_netdev_flow_offload_main(void *data OVS_UNUSED)
 {
     struct dp_offload_thread_item *offload;
     struct ovs_list *list;
     long long int latency_us;
+    long long int next_rcu;
+    long long int now;
     const char *op;
     int ret;
 
+    next_rcu = time_usec() + DP_NETDEV_OFFLOAD_QUIESCE_INTERVAL_US;
     for (;;) {
         ovs_mutex_lock(&dp_offload_thread.mutex);
         if (ovs_list_is_empty(&dp_offload_thread.list)) {
@@ -2756,6 +2761,7 @@ dp_netdev_flow_offload_main(void *data OVS_UNUSED)
             ovs_mutex_cond_wait(&dp_offload_thread.cond,
                                 &dp_offload_thread.mutex);
             ovsrcu_quiesce_end();
+            next_rcu = time_usec() + DP_NETDEV_OFFLOAD_QUIESCE_INTERVAL_US;
         }
         list = ovs_list_pop_front(&dp_offload_thread.list);
         dp_offload_thread.enqueued_item--;
@@ -2779,7 +2785,9 @@ dp_netdev_flow_offload_main(void *data OVS_UNUSED)
             OVS_NOT_REACHED();
         }
 
-        latency_us = time_usec() - offload->timestamp;
+        now = time_usec();
+
+        latency_us = now - offload->timestamp;
         mov_avg_cma_update(&dp_offload_thread.cma, latency_us);
         mov_avg_ema_update(&dp_offload_thread.ema, latency_us);
 
@@ -2787,7 +2795,12 @@ dp_netdev_flow_offload_main(void *data OVS_UNUSED)
                  ret == 0 ? "succeed" : "failed", op,
                  UUID_ARGS((struct uuid *) &offload->flow->mega_ufid));
         dp_netdev_free_flow_offload(offload);
-        ovsrcu_quiesce();
+
+        /* Do RCU synchronization at fixed interval. */
+        if (now > next_rcu) {
+            ovsrcu_quiesce();
+            next_rcu = time_usec() + DP_NETDEV_OFFLOAD_QUIESCE_INTERVAL_US;
+        }
     }
 
     return NULL;
-- 
2.31.1



More information about the dev mailing list