[ovs-dev] [PATCH RFC ovn 4/4] ovn-controller: Skip non-local lflows in ovn-controller before parsing.

Han Zhou hzhou at ovn.org
Thu Jul 1 05:45:22 UTC 2021


With the help of logical_flow's in_out_port tag, we can skip parsing a
big portion of the logical flows in SB DB, which can largely improve
ovn-controller's performance whenever a full recompute is required.

With a scale test topology of 1000 chassises, 20 LSPs per chassis, 20k
lports in total spread acrossing 200 logical switches, connected by a
logical router, the test result before & after this change:

Before:
- lflow-cache disabled:
    - ovn-controller recompute: 2.7 sec
- lflow-cache enabled:
    - ovn-controller recompute: 2.1 sec
    - lflow cache memory: 622103 KB

After:
- lflow-cache disabled:
    - ovn-controller recompute: 0.83 sec
- lflow-cache enabled:
    - ovn-controller recompute: 0.71 sec
    - lflow cache memory: 123641 KB

(note: DP group enabled for both)

So for this test scenario, when lflow cache is disabled, latency reduced
~70%; when lflow cache is enabled, latency reduced ~65% and lflow cache
memory reduced ~80%.

Signed-off-by: Han Zhou <hzhou at ovn.org>
---
 controller/lflow.c          | 21 +++++++++++++++++++++
 controller/lflow.h          |  1 +
 controller/ovn-controller.c |  1 +
 3 files changed, 23 insertions(+)

diff --git a/controller/lflow.c b/controller/lflow.c
index b7699a309..ee05c559c 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -740,6 +740,27 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
         return true;
     }
 
+    const char *io_port = smap_get(&lflow->tags, "in_out_port");
+    if (io_port) {
+        lflow_resource_add(l_ctx_out->lfrr, REF_TYPE_PORTBINDING, io_port,
+                           &lflow->header_.uuid);
+        const struct sbrec_port_binding *pb
+            = lport_lookup_by_name(l_ctx_in->sbrec_port_binding_by_name,
+                                   io_port);
+        if (!pb) {
+            VLOG_DBG("lflow "UUID_FMT" matches inport/outport %s that's not "
+                     "found, skip", UUID_ARGS(&lflow->header_.uuid), io_port);
+            return true;
+        }
+        char buf[16];
+        get_unique_lport_key(dp->tunnel_key, pb->tunnel_key, buf, sizeof buf);
+        if (!sset_contains(l_ctx_in->local_lport_ids, buf)) {
+            VLOG_DBG("lflow "UUID_FMT" matches inport/outport %s that's not "
+                     "local, skip", UUID_ARGS(&lflow->header_.uuid), io_port);
+            return true;
+        }
+    }
+
     /* Determine translation of logical table IDs to physical table IDs. */
     bool ingress = !strcmp(lflow->pipeline, "ingress");
 
diff --git a/controller/lflow.h b/controller/lflow.h
index 9d8882ae5..797d2d026 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -146,6 +146,7 @@ struct lflow_ctx_in {
     const struct shash *port_groups;
     const struct sset *active_tunnels;
     const struct sset *local_lport_ids;
+    const struct sset *local_lports;
 };
 
 struct lflow_ctx_out {
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index b15ecbb5d..24da79628 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -2048,6 +2048,7 @@ init_lflow_ctx(struct engine_node *node,
     l_ctx_in->port_groups = port_groups;
     l_ctx_in->active_tunnels = &rt_data->active_tunnels;
     l_ctx_in->local_lport_ids = &rt_data->local_lport_ids;
+    l_ctx_in->local_lports = &rt_data->local_lports;
 
     l_ctx_out->flow_table = &fo->flow_table;
     l_ctx_out->group_table = &fo->group_table;
-- 
2.30.2



More information about the dev mailing list