[ovs-dev] [PATCH ovn v2 1/2] lflow: Generate unique integer for each Load Balancer

Mark Gray mark.d.gray at redhat.com
Wed Aug 11 20:00:15 UTC 2021


Conjunctive flows require a unique integer as an identifier.
In a future patch, we will use conjunctive flows for LB
hairpin flows. This patch generates a unique integer id for each
LB.

Signed-off-by: Mark Gray <mark.d.gray at redhat.com>
---
 controller/lflow.c          | 54 ++++++++++++++++++++++++++++++++-----
 controller/lflow.h          |  2 ++
 controller/ovn-controller.c | 14 ++++++++++
 3 files changed, 64 insertions(+), 6 deletions(-)

diff --git a/controller/lflow.c b/controller/lflow.c
index 2e08261c6aed..4aa882d00c54 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -17,6 +17,7 @@
 #include "lflow.h"
 #include "coverage.h"
 #include "ha-chassis.h"
+#include "lib/id-pool.h"
 #include "lflow-cache.h"
 #include "local_data.h"
 #include "lport.h"
@@ -1528,8 +1529,13 @@ add_lb_ct_snat_vip_flows(struct ovn_controller_lb *lb,
 static void
 consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
                           const struct hmap *local_datapaths,
-                          struct ovn_desired_flow_table *flow_table)
+                          struct ovn_desired_flow_table *flow_table,
+                          struct simap *ids)
 {
+    int id = simap_get(ids, sbrec_lb->name);
+    VLOG_DBG("Load Balancer %s has conjunctive flow id %u",
+             sbrec_lb->name, id);
+
     /* Check if we need to add flows or not.  If there is one datapath
      * in the local_datapaths, it means all the datapaths of the lb
      * will be in the local_datapaths. */
@@ -1576,11 +1582,26 @@ consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
 static void
 add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
                      const struct hmap *local_datapaths,
-                     struct ovn_desired_flow_table *flow_table)
+                     struct ovn_desired_flow_table *flow_table,
+                     struct simap *ids,
+                     struct id_pool *pool)
 {
+    uint32_t id;
     const struct sbrec_load_balancer *lb;
     SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
-        consider_lb_hairpin_flows(lb, local_datapaths, flow_table);
+        /* Allocate a unique 32-bit integer to this load-balancer. This will
+           be used as a conjunctive flow id in the OFTABLE_CT_SNAT_HAIRPIN
+           table.
+
+           If we are unable to allocate a unique ID then we have run out of
+           ids. As this is unrecoverable then we abort. However, this is
+           unlikely to happen as it would be mean that we have created
+           "UINT32_MAX" load-balancers.
+         */
+
+        ovs_assert(id_pool_alloc_id(pool, &id));
+        simap_put(ids, lb->name, id);
+        consider_lb_hairpin_flows(lb, local_datapaths, flow_table, ids);
     }
 }
 
@@ -1686,7 +1707,9 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
                        l_ctx_in->mac_binding_table, l_ctx_in->local_datapaths,
                        l_ctx_out->flow_table);
     add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
-                         l_ctx_out->flow_table);
+                         l_ctx_out->flow_table,
+                         l_ctx_out->hairpin_lb_ids,
+                         l_ctx_out->hairpin_id_pool);
     add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
                   l_ctx_out->flow_table);
 }
@@ -1804,7 +1827,8 @@ lflow_processing_end:
      * associated. */
     for (size_t i = 0; i < n_dp_lbs; i++) {
         consider_lb_hairpin_flows(dp_lbs[i], l_ctx_in->local_datapaths,
-                                  l_ctx_out->flow_table);
+                                  l_ctx_out->flow_table,
+                                  l_ctx_out->hairpin_lb_ids);
     }
 
     return handled;
@@ -1876,12 +1900,16 @@ lflow_handle_changed_lbs(struct lflow_ctx_in *l_ctx_in,
                          struct lflow_ctx_out *l_ctx_out)
 {
     const struct sbrec_load_balancer *lb;
+    struct id_pool *pool = l_ctx_out->hairpin_id_pool;
+    struct simap *ids = l_ctx_out->hairpin_lb_ids;
 
     SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
         if (sbrec_load_balancer_is_deleted(lb)) {
             VLOG_DBG("Remove hairpin flows for deleted load balancer "UUID_FMT,
                      UUID_ARGS(&lb->header_.uuid));
             ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+            id_pool_free_id(pool, simap_get(ids, lb->name));
+            simap_find_and_delete(ids, lb->name);
         }
     }
 
@@ -1894,12 +1922,26 @@ lflow_handle_changed_lbs(struct lflow_ctx_in *l_ctx_in,
             VLOG_DBG("Remove hairpin flows for updated load balancer "UUID_FMT,
                      UUID_ARGS(&lb->header_.uuid));
             ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+        } else {
+            /* Allocate a unique 32-bit integer to this load-balancer. This
+               will be used as a conjunctive flow id in the
+               OFTABLE_CT_SNAT_HAIRPIN table.
+
+               If we are unable to allocate a unique ID then we have run out of
+               ids. As this is unrecoverable then we abort. However, this is
+               unlikely to happen as it would be mean that we have created
+               "UINT32_MAX" load-balancers.
+            */
+            uint32_t id;
+            ovs_assert(id_pool_alloc_id(pool, &id));
+            simap_put(ids, lb->name, id);
         }
 
         VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
                  UUID_ARGS(&lb->header_.uuid));
         consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
-                                  l_ctx_out->flow_table);
+                                  l_ctx_out->flow_table,
+                                  l_ctx_out->hairpin_lb_ids);
     }
 
     return true;
diff --git a/controller/lflow.h b/controller/lflow.h
index 6fcda5355cdf..c4a20ce309ba 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -158,6 +158,8 @@ struct lflow_ctx_out {
     struct lflow_cache *lflow_cache;
     uint32_t *conj_id_ofs;
     bool conj_id_overflow;
+    struct simap *hairpin_lb_ids;
+    struct id_pool *hairpin_id_pool;
 };
 
 void lflow_init(void);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 75f16b15e051..2fbea0d10587 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -33,6 +33,7 @@
 #include "openvswitch/dynamic-string.h"
 #include "encaps.h"
 #include "fatal-signal.h"
+#include "lib/id-pool.h"
 #include "if-status.h"
 #include "ip-mcast.h"
 #include "openvswitch/hmap.h"
@@ -2166,6 +2167,11 @@ struct lflow_output_persistent_data {
     struct lflow_cache *lflow_cache;
 };
 
+struct lflow_output_hairpin_data {
+    struct id_pool *pool;
+    struct simap   *ids;
+};
+
 struct ed_type_lflow_output {
     /* Logical flow table */
     struct ovn_desired_flow_table flow_table;
@@ -2179,6 +2185,9 @@ struct ed_type_lflow_output {
     /* Data which is persistent and not cleared during
      * full recompute. */
     struct lflow_output_persistent_data pd;
+
+    /* Data for managing hairpin flow conjunctive flow ids. */
+    struct lflow_output_hairpin_data hd;
 };
 
 static void
@@ -2300,6 +2309,8 @@ init_lflow_ctx(struct engine_node *node,
     l_ctx_out->conj_id_ofs = &fo->pd.conj_id_ofs;
     l_ctx_out->lflow_cache = fo->pd.lflow_cache;
     l_ctx_out->conj_id_overflow = false;
+    l_ctx_out->hairpin_id_pool = fo->hd.pool;
+    l_ctx_out->hairpin_lb_ids = fo->hd.ids;
 }
 
 static void *
@@ -2312,6 +2323,9 @@ en_lflow_output_init(struct engine_node *node OVS_UNUSED,
     ovn_extend_table_init(&data->meter_table);
     data->pd.conj_id_ofs = 1;
     lflow_resource_init(&data->lflow_resource_ref);
+    data->hd.ids = xzalloc(sizeof(*(data->hd.ids)));
+    simap_init(data->hd.ids);
+    data->hd.pool = id_pool_create(1, UINT32_MAX - 1);
     return data;
 }
 
-- 
2.27.0



More information about the dev mailing list