[ovs-dev] [PATCH] ofproto-dpif-upcall: Batch upcalls.

Jarno Rajahalme jrajahalme at nicira.com
Tue Aug 27 22:04:54 UTC 2013


Batching reduces overheads and enables upto 4 times the upcall processing
performance in a specialized test case.

Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
 ofproto/ofproto-dpif-upcall.c |   30 +++++++++++++++++++++++++++---
 1 file changed, 27 insertions(+), 3 deletions(-)

diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index db78af3..112c78d 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -55,6 +55,8 @@ struct handler {
     struct list upcalls OVS_GUARDED;
     size_t n_upcalls OVS_GUARDED;
 
+    size_t n_new_upcalls;              /* Only changed by the dispatcher. */
+
     pthread_cond_t wake_cond;          /* Wakes 'thread' while holding
                                           'mutex'. */
 };
@@ -515,6 +517,10 @@ static void
 recv_upcalls(struct udpif *udpif)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
+    size_t n_udpif_new_upcalls = 0;
+    struct handler *handler;
+    int n;
+
     for (;;) {
         struct upcall *upcall;
         int error;
@@ -535,7 +541,6 @@ recv_upcalls(struct udpif *udpif)
         } else if (upcall->type == MISS_UPCALL) {
             struct dpif_upcall *dupcall = &upcall->dpif_upcall;
             uint32_t hash = udpif->secret;
-            struct handler *handler;
             struct nlattr *nla;
             size_t n_bytes, left;
 
@@ -562,8 +567,12 @@ recv_upcalls(struct udpif *udpif)
             if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
                 list_push_back(&handler->upcalls, &upcall->list_node);
                 handler->n_upcalls++;
-                xpthread_cond_signal(&handler->wake_cond);
                 ovs_mutex_unlock(&handler->mutex);
+
+                if (++handler->n_new_upcalls >= FLOW_MISS_MAX_BATCH) {
+                    handler->n_new_upcalls = 0;
+                    xpthread_cond_signal(&handler->wake_cond);
+                }
                 if (!VLOG_DROP_DBG(&rl)) {
                     struct ds ds = DS_EMPTY_INITIALIZER;
 
@@ -584,7 +593,11 @@ recv_upcalls(struct udpif *udpif)
                 udpif->n_upcalls++;
                 list_push_back(&udpif->upcalls, &upcall->list_node);
                 ovs_mutex_unlock(&udpif->upcall_mutex);
-                seq_change(udpif->wait_seq);
+
+                if (++n_udpif_new_upcalls >= FLOW_MISS_MAX_BATCH) {
+                    n_udpif_new_upcalls = 0;
+                    seq_change(udpif->wait_seq);
+                }
             } else {
                 ovs_mutex_unlock(&udpif->upcall_mutex);
                 COVERAGE_INC(upcall_queue_overflow);
@@ -592,6 +605,17 @@ recv_upcalls(struct udpif *udpif)
             }
         }
     }
+    for (n = 0; n < udpif->n_handlers; ++n) {
+        handler = &udpif->handlers[n];
+        if (handler->n_new_upcalls) {
+            handler->n_new_upcalls = 0;
+            xpthread_cond_signal(&handler->wake_cond);
+        }
+    }
+    if (n_udpif_new_upcalls) {
+        n_udpif_new_upcalls = 0;
+        seq_change(udpif->wait_seq);
+    }
 }
 
 static struct flow_miss *
-- 
1.7.10.4




More information about the dev mailing list