[ovs-dev] [OVN Patch v4 4/4] northd: Restore parallel build with dp_groups

anton.ivanov at cambridgegreys.com anton.ivanov at cambridgegreys.com
Fri Aug 27 17:48:00 UTC 2021


From: Anton Ivanov <anton.ivanov at cambridgegreys.com>

Restore parallel build with dp groups using rwlock instead
of per row locking as an underlying mechanism.

This provides improvement ~ 10% end-to-end on ovn-heater
under virutalization despite awakening some qemu gremlin
which makes qemu climb to silly CPU usage. The gain on
bare metal is likely to be higher.

Signed-off-by: Anton Ivanov <anton.ivanov at cambridgegreys.com>
---
 northd/ovn-northd.c | 156 ++++++++++++++++++++++++++++++++------------
 1 file changed, 114 insertions(+), 42 deletions(-)

diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 6493780d8..abf2ad9d8 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -59,6 +59,7 @@
 #include "unixctl.h"
 #include "util.h"
 #include "uuid.h"
+#include "ovs-thread.h"
 #include "openvswitch/vlog.h"
 
 VLOG_DEFINE_THIS_MODULE(ovn_northd);
@@ -4368,7 +4369,21 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
 static bool use_logical_dp_groups = false;
 static bool use_parallel_build = true;
 
-static struct hashrow_locks lflow_locks;
+static struct ovs_rwlock flowtable_lock;
+
+static void ovn_make_multi_lflow(struct ovn_lflow *old_lflow,
+                              struct ovn_datapath *od,
+                              struct lflow_state *lflow_map,
+                              uint32_t hash)
+{
+    hmapx_add(&old_lflow->od_group, od);
+    hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);
+    if (use_parallel_build) {
+        hmap_insert_fast(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);
+    } else {
+        hmap_insert(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);
+    }
+}
 
 /* Adds a row with the specified contents to the Logical_Flow table.
  * Version to use when locking is required.
@@ -4383,52 +4398,120 @@ do_ovn_lflow_add(struct lflow_state *lflow_map, struct ovn_datapath *od,
 
     struct ovn_lflow *old_lflow;
     struct ovn_lflow *lflow;
+    bool can_write = false;
+
+    /* Fast Path.
+     * See if we can get away without writing - grab a rdlock and check if we can get away 
+     * with as little work as possible.
+     */
 
     if (use_logical_dp_groups) {
+        if (use_parallel_build) {
+            ovs_rwlock_rdlock(&flowtable_lock);
+        }
         old_lflow = do_ovn_lflow_find(&lflow_map->single_od, NULL, stage, priority, match,
                                    actions, ctrl_meter, hash);
         if (old_lflow) {
-            hmapx_add(&old_lflow->od_group, od);
-            /* Found, different, od count went up. Move to multiple od. */
-            if (hmapx_count(&old_lflow->od_group) > 1) {
-                hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);
+            if (!hmapx_contains(&old_lflow->od_group, od)) {
+                /* od not in od_group, we need to add it and move to multiple. */
                 if (use_parallel_build) {
-                    hmap_insert_fast(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);
-                } else {
-                    hmap_insert(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);
+                    /* Upgrade the lock to write, we are likely to modify data */
+                    if (!can_write) {
+                        ovs_rwlock_unlock(&flowtable_lock);
+                        ovs_rwlock_wrlock(&flowtable_lock);
+                    }
+                    can_write = true;
+                    /* Search again, someone may have gotten here ahead of us
+                     * and moved it to multiple. */
+                    if (!hmap_contains(&lflow_map->single_od, &old_lflow->hmap_node)) {
+                        old_lflow = NULL;
+                        /* Someone did get ahead of us. */
+                        goto amend_multiple;
+                    }
                 }
+                ovn_make_multi_lflow(old_lflow, od, lflow_map, hash); 
             }
-        } else {
-            /* Not found, lookup in multiple od. */
+        }
+amend_multiple:
+        if (!old_lflow) {
+            /* Not found in single, lookup in multiple od. */
             old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL, stage, priority, match,
                                        actions, ctrl_meter, hash);
             if (old_lflow) {
-                hmapx_add(&old_lflow->od_group, od);
+                if (!hmapx_contains(&old_lflow->od_group, od)) {
+                    if (use_parallel_build && (!can_write)) {
+                        /* Upgrade lock to write.*/
+                        ovs_rwlock_unlock(&flowtable_lock);
+                        ovs_rwlock_wrlock(&flowtable_lock);
+                    }
+                    hmapx_add(&old_lflow->od_group, od);
+                }
             }
         }
+        if (use_parallel_build) {
+            ovs_rwlock_unlock(&flowtable_lock);
+        }
         if (old_lflow) {
             return;
         }
     }
 
-    lflow = xmalloc(sizeof *lflow);
-    /* While adding new logical flows we're not setting single datapath, but
-     * collecting a group.  'od' will be updated later for all flows with only
-     * one datapath in a group, so it could be hashed correctly. */
-    ovn_lflow_init(lflow, NULL, stage, priority,
-                   xstrdup(match), xstrdup(actions),
-                   io_port ? xstrdup(io_port) : NULL,
-                   nullable_xstrdup(ctrl_meter),
-                   ovn_lflow_hint(stage_hint), where);
-    hmapx_add(&lflow->od_group, od);
-
-    /* Insert "fresh" lflows into single_od. */
+    /* Slow Path.
+     * We could not get away with minimal mostly ro amount of work,
+     * lock with rw and try to do an insert (may end up repeating
+     * some of what we do for ro). */
 
+    if (use_logical_dp_groups && use_parallel_build) {
+        ovs_rwlock_wrlock(&flowtable_lock);
+    }
     if (!use_parallel_build) {
+        lflow = xmalloc(sizeof *lflow);
+        /* While adding new logical flows we're not setting single datapath, but
+         * collecting a group.  'od' will be updated later for all flows with only
+         * one datapath in a group, so it could be hashed correctly. */
+        ovn_lflow_init(lflow, NULL, stage, priority,
+                       xstrdup(match), xstrdup(actions),
+                       io_port ? xstrdup(io_port) : NULL,
+                       nullable_xstrdup(ctrl_meter),
+                       ovn_lflow_hint(stage_hint), where);
+        hmapx_add(&lflow->od_group, od);
         hmap_insert(&lflow_map->single_od, &lflow->hmap_node, hash);
     } else {
+        if (use_logical_dp_groups) {
+            /* Search again in case someone else got here first. */
+            old_lflow = do_ovn_lflow_find(&lflow_map->single_od, NULL, stage, priority, match,
+                                       actions, ctrl_meter, hash);
+            if (old_lflow) {
+                if (!hmapx_contains(&old_lflow->od_group, od)) {
+                    ovn_make_multi_lflow(old_lflow, od, lflow_map, hash); 
+                }
+                goto done_unlock;
+            }
+            /* Unlikely, but possible, more than one thread got here ahead of us while
+             * we were wating to acquire a write lock. */
+            old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL, stage, priority, match,
+                                       actions, ctrl_meter, hash);
+            if (old_lflow) {
+                hmapx_add(&old_lflow->od_group, od);
+                goto done_unlock;
+            }
+        }
+        lflow = xmalloc(sizeof *lflow);
+        /* While adding new logical flows we're not setting single datapath, but
+         * collecting a group.  'od' will be updated later for all flows with only
+         * one datapath in a group, so it could be hashed correctly. */
+        ovn_lflow_init(lflow, NULL, stage, priority,
+                       xstrdup(match), xstrdup(actions),
+                       io_port ? xstrdup(io_port) : NULL,
+                       nullable_xstrdup(ctrl_meter),
+                       ovn_lflow_hint(stage_hint), where);
+        hmapx_add(&lflow->od_group, od);
         hmap_insert_fast(&lflow_map->single_od, &lflow->hmap_node, hash);
     }
+done_unlock:
+    if (use_logical_dp_groups && use_parallel_build) {
+        ovs_rwlock_unlock(&flowtable_lock);
+    }
 }
 
 /* Adds a row with the specified contents to the Logical_Flow table. */
@@ -4448,15 +4531,8 @@ ovn_lflow_add_at(struct lflow_state *lflow_map, struct ovn_datapath *od,
                                  priority, match,
                                  actions);
 
-    if (use_logical_dp_groups && use_parallel_build) {
-        lock_hash_row(&lflow_locks, hash);
-        do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,
-                         actions, io_port, stage_hint, where, ctrl_meter);
-        unlock_hash_row(&lflow_locks, hash);
-    } else {
-        do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,
-                         actions, io_port, stage_hint, where, ctrl_meter);
-    }
+    do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,
+                     actions, io_port, stage_hint, where, ctrl_meter);
 }
 
 /* Adds a row with the specified contents to the Logical_Flow table. */
@@ -4548,15 +4624,9 @@ hmap_safe_remove(struct hmap *hmap, struct hmap_node *node)
 static void
 remove_lflow_from_lflows(struct lflow_state *lflows, struct ovn_lflow *lflow)
 {
-    if (use_logical_dp_groups && use_parallel_build) {
-        lock_hash_row(&lflow_locks, lflow->hmap_node.hash);
-    }
     if (!hmap_safe_remove(&lflows->multiple_od, &lflow->hmap_node)) {
         hmap_remove(&lflows->single_od, &lflow->hmap_node);
     }
-    if (use_logical_dp_groups && use_parallel_build) {
-        unlock_hash_row(&lflow_locks, lflow->hmap_node.hash);
-    }
 }
 
 static void
@@ -12948,7 +13018,7 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
         }
     }
 
-    if (use_parallel_build && (!use_logical_dp_groups)) {
+    if (use_parallel_build) {
         struct lflow_state *lflow_segs;
         struct lswitch_flow_build_info *lsiv;
         int index;
@@ -13176,6 +13246,8 @@ reconcile_lflow(struct ovn_lflow *lflow, struct northd_context *ctx,
     ovn_lflow_destroy(lflows, lflow);
 }
 
+static bool needs_init = true;
+
 /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,
  * constructing their contents based on the OVN_NB database. */
 static void
@@ -13190,8 +13262,9 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,
 
     fast_hmap_size_for(&lflows.single_od, max_seen_lflow_size);
     fast_hmap_size_for(&lflows.multiple_od, max_seen_lflow_size);
-    if (use_parallel_build && use_logical_dp_groups) {
-        update_hashrow_locks(&lflows.single_od, &lflow_locks);
+    if (use_parallel_build && use_logical_dp_groups && needs_init) {
+        ovs_rwlock_init(&flowtable_lock);
+        needs_init = false;
     }
 
 
@@ -15118,7 +15191,6 @@ main(int argc, char *argv[])
 
     daemonize_complete();
 
-    init_hash_row_locks(&lflow_locks);
     use_parallel_build = can_parallelize_hashes(false);
 
     /* We want to detect (almost) all changes to the ovn-nb db. */
-- 
2.20.1



More information about the dev mailing list