[ovs-dev] [PATCH] ofproto-dpif-upcall: reduce number of wakeup

YAMAMOTO Takashi yamt at mwd.biglobe.ne.jp
Wed Sep 4 23:04:36 UTC 2013


if a queue length is long (ie. non-0), the consumer thread should
already be busy working on the queue.  there's no need to wake it
up repeatedly.

Signed-off-by: YAMAMOTO Takashi <yamt at mwd.biglobe.ne.jp>
---
 ofproto/ofproto-dpif-upcall.c | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index 54f441b..1829b89 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -55,7 +55,7 @@ struct handler {
     struct list upcalls OVS_GUARDED;
     size_t n_upcalls OVS_GUARDED;
 
-    size_t n_new_upcalls;              /* Only changed by the dispatcher. */
+    bool need_signal;                  /* Only changed by the dispatcher. */
 
     pthread_cond_t wake_cond;          /* Wakes 'thread' while holding
                                           'mutex'. */
@@ -220,6 +220,7 @@ udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable)
 
             handler->udpif = udpif;
             list_init(&handler->upcalls);
+            handler->need_signal = false;
             xpthread_cond_init(&handler->wake_cond, NULL);
             ovs_mutex_init(&handler->mutex);
             xpthread_create(&handler->thread, NULL, udpif_miss_handler, handler);
@@ -548,7 +549,10 @@ static void
 recv_upcalls(struct udpif *udpif)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
-    size_t n_udpif_new_upcalls = 0;
+    size_t n_upcalls = 0;
+    bool udpif_need_signal = false;  /* the consumer might went sleep without
+                                        processing requests we put on the
+                                        queue */
     struct handler *handler;
     int n;
 
@@ -597,9 +601,13 @@ recv_upcalls(struct udpif *udpif)
             ovs_mutex_lock(&handler->mutex);
             if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
                 list_push_back(&handler->upcalls, &upcall->list_node);
-                handler->n_new_upcalls = ++handler->n_upcalls;
-
-                if (handler->n_new_upcalls >= FLOW_MISS_MAX_BATCH) {
+                if (handler->n_upcalls == 0) {
+                    handler->need_signal = true;
+                }
+                handler->n_upcalls++;
+                if (handler->need_signal &&
+                    handler->n_upcalls >= FLOW_MISS_MAX_BATCH) {
+                    handler->need_signal = false;
                     xpthread_cond_signal(&handler->wake_cond);
                 }
                 ovs_mutex_unlock(&handler->mutex);
@@ -620,11 +628,13 @@ recv_upcalls(struct udpif *udpif)
         } else {
             ovs_mutex_lock(&udpif->upcall_mutex);
             if (udpif->n_upcalls < MAX_QUEUE_LENGTH) {
-                n_udpif_new_upcalls = ++udpif->n_upcalls;
+                if (udpif->n_upcalls == 0) {
+                    udpif_need_signal = true;
+                }
+                n_upcalls = ++udpif->n_upcalls;
                 list_push_back(&udpif->upcalls, &upcall->list_node);
                 ovs_mutex_unlock(&udpif->upcall_mutex);
-
-                if (n_udpif_new_upcalls >= FLOW_MISS_MAX_BATCH) {
+                if (udpif_need_signal && n_upcalls >= FLOW_MISS_MAX_BATCH) {
                     seq_change(udpif->wait_seq);
                 }
             } else {
@@ -636,14 +646,14 @@ recv_upcalls(struct udpif *udpif)
     }
     for (n = 0; n < udpif->n_handlers; ++n) {
         handler = &udpif->handlers[n];
-        if (handler->n_new_upcalls) {
-            handler->n_new_upcalls = 0;
+        if (handler->need_signal) {
+            handler->need_signal = false;
             ovs_mutex_lock(&handler->mutex);
             xpthread_cond_signal(&handler->wake_cond);
             ovs_mutex_unlock(&handler->mutex);
         }
     }
-    if (n_udpif_new_upcalls) {
+    if (udpif_need_signal) {
         seq_change(udpif->wait_seq);
     }
 }
-- 
1.8.3.1




More information about the dev mailing list