[ovs-dev] [RFC] ovn-controller: Only process lflows for local datapaths.

Russell Bryant russell at ovn.org
Thu Jan 21 20:21:00 UTC 2016


Previously, ovn-controller translated logical flows into OpenFlow flows
for *every* logical datapath.  This patch makes it so we skip doing so
for the egress pipeline if the datapath is a logical switch with no
logical ports bound locally.  In that case, the flows have no effect.

This was the code path taking the most time in a large scale OVN
environment and was an easy optimization to make based on the existing
local_datapaths info.

In this environment, while idling, ovn-controller was taking up about
20% CPU with this patch, while other nodes were in the 40-70% range.

Reported-at: https://bugs.launchpad.net/networking-ovn/+bug/1536003
Signed-off-by: Russell Bryant <russell at ovn.org>
Tested-by: Matt Mulsow <mailto:mamulsow at us.ibm.com>
---


As discussed in the OVN IRC meeting today, this is one patch I came up
with while trying to analyze the performance in a large scale OVN
test setup.  It made a big impact for not much code.  I know Ben had some
suggestions for how to clean this up, so I'm just submitting as RFC for now.


 ovn/controller/lflow.c          | 34 ++++++++++++++++++++++++++++++++--
 ovn/controller/lflow.h          |  3 ++-
 ovn/controller/ovn-controller.c |  2 +-
 3 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
index 32491cf..f1d8ada 100644
--- a/ovn/controller/lflow.c
+++ b/ovn/controller/lflow.c
@@ -157,6 +157,11 @@ symtab_init(void)
 
 /* Logical datapaths and logical port numbers. */
 
+enum ldp_type {
+    LDP_TYPE_ROUTER,
+    LDP_TYPE_SWITCH,
+};
+
 /* A logical datapath.
  *
  * 'ports' maps 'logical_port' names to 'tunnel_key' values in the OVN_SB
@@ -166,6 +171,7 @@ struct logical_datapath {
     struct uuid uuid;           /* UUID from Datapath_Binding row. */
     uint32_t tunnel_key;        /* 'tunnel_key' from Datapath_Binding row. */
     struct simap ports;         /* Logical port name to port number. */
+    enum ldp_type type;         /* Type of logical datapath */
 };
 
 /* Contains "struct logical_datapath"s. */
@@ -197,6 +203,8 @@ ldp_create(const struct sbrec_datapath_binding *binding)
                 uuid_hash(&binding->header_.uuid));
     ldp->uuid = binding->header_.uuid;
     ldp->tunnel_key = binding->tunnel_key;
+    const char *ls = smap_get(&binding->external_ids, "logical-switch");
+    ldp->type = ls ? LDP_TYPE_SWITCH : LDP_TYPE_ROUTER;
     simap_init(&ldp->ports);
     return ldp;
 }
@@ -267,7 +275,8 @@ lflow_init(void)
  * into OpenFlow flows.  See ovn-architecture(7) for more information. */
 void
 lflow_run(struct controller_ctx *ctx, struct hmap *flow_table,
-          const struct simap *ct_zones)
+          const struct simap *ct_zones,
+          struct hmap *local_datapaths)
 {
     struct hmap flows = HMAP_INITIALIZER(&flows);
     uint32_t conj_id_ofs = 1;
@@ -286,8 +295,29 @@ lflow_run(struct controller_ctx *ctx, struct hmap *flow_table,
             continue;
         }
 
-        /* Determine translation of logical table IDs to physical table IDs. */
         bool ingress = !strcmp(lflow->pipeline, "ingress");
+
+        if (ldp->type == LDP_TYPE_SWITCH && !ingress) {
+            /* For a logical switch datapath, local_datapaths tells us if there
+             * are any local ports for this datapath.  If not, processing
+             * logical flows for the egress pipeline of this datapath is
+             * unnecessary.
+             *
+             * We still need the ingress pipeline because even if there are no
+             * local ports, we still may need to execute the ingress pipeline
+             * after a packet leaves a logical router.  Further optimization
+             * is possible, but not based on what we know with local_datapaths
+             * right now.
+             */
+
+            struct hmap_node *ld;
+            ld = hmap_first_with_hash(local_datapaths, ldp->tunnel_key);
+            if (!ld) {
+                continue;
+            }
+        }
+
+        /* Determine translation of logical table IDs to physical table IDs. */
         uint8_t first_ptable = (ingress
                                 ? OFTABLE_LOG_INGRESS_PIPELINE
                                 : OFTABLE_LOG_EGRESS_PIPELINE);
diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
index 4a4fa83..ccbad30 100644
--- a/ovn/controller/lflow.h
+++ b/ovn/controller/lflow.h
@@ -57,7 +57,8 @@ struct uuid;
 
 void lflow_init(void);
 void lflow_run(struct controller_ctx *, struct hmap *flow_table,
-               const struct simap *ct_zones);
+               const struct simap *ct_zones,
+               struct hmap *local_datapaths);
 void lflow_destroy(void);
 
 #endif /* ovn/lflow.h */
diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
index b4d3fd3..7d285e3 100644
--- a/ovn/controller/ovn-controller.c
+++ b/ovn/controller/ovn-controller.c
@@ -300,7 +300,7 @@ main(int argc, char *argv[])
             pinctrl_run(&ctx, br_int);
 
             struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
-            lflow_run(&ctx, &flow_table, &ct_zones);
+            lflow_run(&ctx, &flow_table, &ct_zones, &local_datapaths);
             if (chassis_id) {
                 physical_run(&ctx, mff_ovn_geneve,
                              br_int, chassis_id, &ct_zones, &flow_table);
-- 
2.5.0




More information about the dev mailing list