[ovs-dev] [threaded-learning v2 12/25] guarded-list: New data structure for thread-safe queue.

Ben Pfaff blp at nicira.com
Fri Sep 13 00:42:03 UTC 2013


On Thu, Sep 12, 2013 at 04:43:11PM -0700, Ben Pfaff wrote:
> On Thu, Sep 12, 2013 at 04:39:01PM -0700, Ethan Jackson wrote:
> > Well for the first suggestion, I think it simply comes down to doing a
> > check if the reval_seq changed during handle_miss_upcalls() like we
> > did before.  If it changed then we skip the guarded_list_push_back().
> > Of course there's a race window, but it's small and doesn't matter.
> 
> I didn't realize you were OK with a small race.  It's trivial then.
> I'll change that.
> 
> > For the second suggestion, I think we just need to change the infinite
> > for loop to loop 50 times and give up if there's nothing there.
> 
> No problem, will do.

I folded this in, let me know if you want changes.

diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index 850633d..bc1e884 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -42,6 +42,7 @@ COVERAGE_DEFINE(upcall_queue_overflow);
 COVERAGE_DEFINE(drop_queue_overflow);
 COVERAGE_DEFINE(miss_queue_overflow);
 COVERAGE_DEFINE(fmb_queue_overflow);
+COVERAGE_DEFINE(fmb_queue_revalidated);
 
 /* A thread that processes each upcall handed to it by the dispatcher thread,
  * forwards the upcall's packet, and then queues it to the main ofproto_dpif
@@ -279,14 +280,16 @@ upcall_destroy(struct upcall *upcall)
 struct flow_miss_batch *
 flow_miss_batch_next(struct udpif *udpif)
 {
-    for (;;) {
+    int i;
+
+    for (i = 0; i < 50; i++) {
         struct flow_miss_batch *next;
         unsigned int reval_seq;
         struct list *next_node;
 
         next_node = guarded_list_pop_front(&udpif->fmbs);
         if (!next_node) {
-            return NULL;
+            break;
         }
 
         next = CONTAINER_OF(next_node, struct flow_miss_batch, list_node);
@@ -297,6 +300,8 @@ flow_miss_batch_next(struct udpif *udpif)
 
         flow_miss_batch_destroy(next);
     }
+
+    return NULL;
 }
 
 /* Destroys and deallocates 'fmb'. */
@@ -696,6 +701,7 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls)
     struct flow_miss_batch *fmb;
     size_t n_upcalls, n_ops, i;
     struct flow_miss *miss;
+    unsigned int reval_seq;
 
     /* Construct the to-do list.
      *
@@ -792,11 +798,15 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls)
     }
     dpif_operate(udpif->dpif, opsp, n_ops);
 
-    if (guarded_list_push_back(&udpif->fmbs, &fmb->list_node,
-                               MAX_QUEUE_LENGTH)) {
-        seq_change(udpif->wait_seq);
-    } else {
+    atomic_read(&udpif->reval_seq, &reval_seq);
+    if (reval_seq != fmb->reval_seq) {
+        COVERAGE_INC(fmb_queue_revalidated);
+        flow_miss_batch_destroy(fmb);
+    } else if (!guarded_list_push_back(&udpif->fmbs, &fmb->list_node,
+                                       MAX_QUEUE_LENGTH)) {
         COVERAGE_INC(fmb_queue_overflow);
         flow_miss_batch_destroy(fmb);
+    } else {
+        seq_change(udpif->wait_seq);
     }
 }



More information about the dev mailing list