[ovs-dev] [PATCH ovn v3 09/10] lflow-cache: Make maximum number of cache entries configurable.

Dumitru Ceara dceara at redhat.com
Tue Feb 9 20:12:48 UTC 2021


Add a new OVS external-id, "ovn-limit-lflow-cache", through which users
can specify the maximum size of the ovn-controller logical flow cache.

To maintain backwards compatibility the default behavior is to not
enforce any limit on the size of the cache.

When the cache becomes full, the rule is to prefer more "important"
cache entries over less "important" ones.  That is, evict entries of
type LCACHE_T_CONJ_ID if there's no room to add an entry of type
LCACHE_T_EXPR.  Similarly, evict entries of type LCACHE_T_CONJ_ID or
LCACHE_T_EXPR if there's no room to add an entry of type
LCACHE_T_MATCHES.

Acked-by: Mark Michelson <mmichels at redhat.com>
Acked-by: Numan Siddique <numans at ovn.org>
Signed-off-by: Dumitru Ceara <dceara at redhat.com>
---
v3:
- add comment to lflow_cache_make_room__().
- update UT outputs.
---
 NEWS                            |    3 +
 controller/chassis.c            |   23 ++++++++-
 controller/lflow-cache.c        |   55 ++++++++++++++++++++-
 controller/lflow-cache.h        |    2 -
 controller/ovn-controller.8.xml |   16 ++++++
 controller/ovn-controller.c     |    8 +++
 controller/test-lflow-cache.c   |   12 +++--
 tests/ovn-lflow-cache.at        |  104 +++++++++++++++++++++++++++++++++++++++
 8 files changed, 213 insertions(+), 10 deletions(-)

diff --git a/NEWS b/NEWS
index 2044671..6e10557 100644
--- a/NEWS
+++ b/NEWS
@@ -16,6 +16,9 @@ Post-v20.12.0
   - Add a new option to Load_Balancer.options, "hairpin_snat_ip", to allow
     users to explicitly select which source IP should be used for load
     balancer hairpin traffic.
+  - ovn-controller: Add a configuration knob, through OVS external-id
+    "ovn-limit-lflow-cache", to allow enforcing a limit for the size of the
+    logical flow cache.
 
 OVN v20.12.0 - 18 Dec 2020
 --------------------------
diff --git a/controller/chassis.c b/controller/chassis.c
index 0937e33..1b63d8e 100644
--- a/controller/chassis.c
+++ b/controller/chassis.c
@@ -50,6 +50,7 @@ struct ovs_chassis_cfg {
     const char *monitor_all;
     const char *chassis_macs;
     const char *enable_lflow_cache;
+    const char *limit_lflow_cache;
 
     /* Set of encap types parsed from the 'ovn-encap-type' external-id. */
     struct sset encap_type_set;
@@ -136,6 +137,12 @@ get_enable_lflow_cache(const struct smap *ext_ids)
 }
 
 static const char *
+get_limit_lflow_cache(const struct smap *ext_ids)
+{
+    return smap_get_def(ext_ids, "ovn-limit-lflow-cache", "");
+}
+
+static const char *
 get_encap_csum(const struct smap *ext_ids)
 {
     return smap_get_def(ext_ids, "ovn-encap-csum", "true");
@@ -257,6 +264,7 @@ chassis_parse_ovs_config(const struct ovsrec_open_vswitch_table *ovs_table,
     ovs_cfg->monitor_all = get_monitor_all(&cfg->external_ids);
     ovs_cfg->chassis_macs = get_chassis_mac_mappings(&cfg->external_ids);
     ovs_cfg->enable_lflow_cache = get_enable_lflow_cache(&cfg->external_ids);
+    ovs_cfg->limit_lflow_cache = get_limit_lflow_cache(&cfg->external_ids);
 
     if (!chassis_parse_ovs_encap_type(encap_type, &ovs_cfg->encap_type_set)) {
         return false;
@@ -284,13 +292,16 @@ chassis_build_other_config(struct smap *config, const char *bridge_mappings,
                            const char *datapath_type, const char *cms_options,
                            const char *monitor_all, const char *chassis_macs,
                            const char *iface_types,
-                           const char *enable_lflow_cache, bool is_interconn)
+                           const char *enable_lflow_cache,
+                           const char *limit_lflow_cache,
+                           bool is_interconn)
 {
     smap_replace(config, "ovn-bridge-mappings", bridge_mappings);
     smap_replace(config, "datapath-type", datapath_type);
     smap_replace(config, "ovn-cms-options", cms_options);
     smap_replace(config, "ovn-monitor-all", monitor_all);
     smap_replace(config, "ovn-enable-lflow-cache", enable_lflow_cache);
+    smap_replace(config, "ovn-limit-lflow-cache", limit_lflow_cache);
     smap_replace(config, "iface-types", iface_types);
     smap_replace(config, "ovn-chassis-mac-mappings", chassis_macs);
     smap_replace(config, "is-interconn", is_interconn ? "true" : "false");
@@ -307,6 +318,7 @@ chassis_other_config_changed(const char *bridge_mappings,
                              const char *monitor_all,
                              const char *chassis_macs,
                              const char *enable_lflow_cache,
+                             const char *limit_lflow_cache,
                              const struct ds *iface_types,
                              bool is_interconn,
                              const struct sbrec_chassis *chassis_rec)
@@ -346,6 +358,13 @@ chassis_other_config_changed(const char *bridge_mappings,
         return true;
     }
 
+    const char *chassis_limit_lflow_cache =
+        get_limit_lflow_cache(&chassis_rec->other_config);
+
+    if (strcmp(limit_lflow_cache, chassis_limit_lflow_cache)) {
+        return true;
+    }
+
     const char *chassis_mac_mappings =
         get_chassis_mac_mappings(&chassis_rec->other_config);
     if (strcmp(chassis_macs, chassis_mac_mappings)) {
@@ -530,6 +549,7 @@ chassis_update(const struct sbrec_chassis *chassis_rec,
                                      ovs_cfg->monitor_all,
                                      ovs_cfg->chassis_macs,
                                      ovs_cfg->enable_lflow_cache,
+                                     ovs_cfg->limit_lflow_cache,
                                      &ovs_cfg->iface_types,
                                      ovs_cfg->is_interconn,
                                      chassis_rec)) {
@@ -543,6 +563,7 @@ chassis_update(const struct sbrec_chassis *chassis_rec,
                                    ovs_cfg->chassis_macs,
                                    ds_cstr_ro(&ovs_cfg->iface_types),
                                    ovs_cfg->enable_lflow_cache,
+                                   ovs_cfg->limit_lflow_cache,
                                    ovs_cfg->is_interconn);
         sbrec_chassis_verify_other_config(chassis_rec);
         sbrec_chassis_set_other_config(chassis_rec, &other_config);
diff --git a/controller/lflow-cache.c b/controller/lflow-cache.c
index 403aed4..5596c2e 100644
--- a/controller/lflow-cache.c
+++ b/controller/lflow-cache.c
@@ -37,6 +37,8 @@ COVERAGE_DEFINE(lflow_cache_add);
 COVERAGE_DEFINE(lflow_cache_hit);
 COVERAGE_DEFINE(lflow_cache_miss);
 COVERAGE_DEFINE(lflow_cache_delete);
+COVERAGE_DEFINE(lflow_cache_full);
+COVERAGE_DEFINE(lflow_cache_made_room);
 
 static const char *lflow_cache_type_names[LCACHE_T_MAX] = {
     [LCACHE_T_CONJ_ID] = "cache-conj-id",
@@ -46,6 +48,7 @@ static const char *lflow_cache_type_names[LCACHE_T_MAX] = {
 
 struct lflow_cache {
     struct hmap entries[LCACHE_T_MAX];
+    uint32_t capacity;
     bool enabled;
 };
 
@@ -56,6 +59,9 @@ struct lflow_cache_entry {
     struct lflow_cache_value value;
 };
 
+static size_t lflow_cache_n_entries__(const struct lflow_cache *lc);
+static bool lflow_cache_make_room__(struct lflow_cache *lc,
+                                    enum lflow_cache_type type);
 static struct lflow_cache_value *lflow_cache_add__(
     struct lflow_cache *lc, const struct uuid *lflow_uuid,
     enum lflow_cache_type type);
@@ -113,16 +119,18 @@ lflow_cache_destroy(struct lflow_cache *lc)
 }
 
 void
-lflow_cache_enable(struct lflow_cache *lc, bool enabled)
+lflow_cache_enable(struct lflow_cache *lc, bool enabled, uint32_t capacity)
 {
     if (!lc) {
         return;
     }
 
-    if (lc->enabled && !enabled) {
+    if ((lc->enabled && !enabled) || capacity < lflow_cache_n_entries__(lc)) {
         lflow_cache_flush(lc);
     }
+
     lc->enabled = enabled;
+    lc->capacity = capacity;
 }
 
 bool
@@ -236,6 +244,40 @@ lflow_cache_delete(struct lflow_cache *lc, const struct uuid *lflow_uuid)
     }
 }
 
+static size_t
+lflow_cache_n_entries__(const struct lflow_cache *lc)
+{
+    size_t n_entries = 0;
+
+    for (size_t i = 0; i < LCACHE_T_MAX; i++) {
+        n_entries += hmap_count(&lc->entries[i]);
+    }
+    return n_entries;
+}
+
+static bool
+lflow_cache_make_room__(struct lflow_cache *lc, enum lflow_cache_type type)
+{
+    /* When the cache becomes full, the rule is to prefer more "important"
+     * cache entries over less "important" ones.  That is, evict entries of
+     * type LCACHE_T_CONJ_ID if there's no room to add an entry of type
+     * LCACHE_T_EXPR.  Similarly, evict entries of type LCACHE_T_CONJ_ID or
+     * LCACHE_T_EXPR if there's no room to add an entry of type
+     * LCACHE_T_MATCHES.
+     */
+    for (size_t i = 0; i < type; i++) {
+        if (hmap_count(&lc->entries[i]) > 0) {
+            struct lflow_cache_entry *lce =
+                CONTAINER_OF(hmap_first(&lc->entries[i]),
+                             struct lflow_cache_entry, node);
+
+            lflow_cache_delete__(lc, lce);
+            return true;
+        }
+    }
+    return false;
+}
+
 static struct lflow_cache_value *
 lflow_cache_add__(struct lflow_cache *lc, const struct uuid *lflow_uuid,
                   enum lflow_cache_type type)
@@ -244,6 +286,15 @@ lflow_cache_add__(struct lflow_cache *lc, const struct uuid *lflow_uuid,
         return NULL;
     }
 
+    if (lflow_cache_n_entries__(lc) == lc->capacity) {
+        if (!lflow_cache_make_room__(lc, type)) {
+            COVERAGE_INC(lflow_cache_full);
+            return NULL;
+        } else {
+            COVERAGE_INC(lflow_cache_made_room);
+        }
+    }
+
     struct lflow_cache_entry *lce = xzalloc(sizeof *lce);
 
     COVERAGE_INC(lflow_cache_add);
diff --git a/controller/lflow-cache.h b/controller/lflow-cache.h
index 03a64f6..58315de 100644
--- a/controller/lflow-cache.h
+++ b/controller/lflow-cache.h
@@ -55,7 +55,7 @@ struct lflow_cache_value {
 struct lflow_cache *lflow_cache_create(void);
 void lflow_cache_flush(struct lflow_cache *);
 void lflow_cache_destroy(struct lflow_cache *);
-void lflow_cache_enable(struct lflow_cache *, bool enabled);
+void lflow_cache_enable(struct lflow_cache *, bool enabled, uint32_t capacity);
 bool lflow_cache_is_enabled(const struct lflow_cache *);
 void lflow_cache_get_stats(const struct lflow_cache *, struct ds *output);
 
diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml
index 1fa7af4..81e16e3 100644
--- a/controller/ovn-controller.8.xml
+++ b/controller/ovn-controller.8.xml
@@ -249,6 +249,22 @@
         processing the southbound and local Open vSwitch database changes.
         The default value is considered false if this option is not defined.
       </dd>
+
+      <dt><code>external_ids:ovn-enable-lflow-cache</code></dt>
+      <dd>
+        The boolean flag indicates if <code>ovn-controller</code> should
+        enable/disable the logical flow in-memory cache it uses when
+        processing Southbound database logical flow changes.  By default
+        caching is enabled.
+      </dd>
+
+      <dt><code>external_ids:ovn-limit-lflow-cache</code></dt>
+      <dd>
+        When used, this configuration value determines the maximum number of
+        logical flow cache entries <code>ovn-controller</code> may create
+        when the logical flow cache is enabled.  By default the size of the
+        cache is unlimited.
+      </dd>
     </dl>
 
     <p>
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index c77cfcd..ecb767c 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -96,6 +96,9 @@ static unixctl_cb_func debug_delay_nb_cfg_report;
 static char *parse_options(int argc, char *argv[]);
 OVS_NO_RETURN static void usage(void);
 
+/* By default don't set an upper bound for the lflow cache. */
+#define DEFAULT_LFLOW_CACHE_MAX_ENTRIES UINT32_MAX
+
 struct controller_engine_ctx {
     struct lflow_cache *lflow_cache;
 };
@@ -582,7 +585,10 @@ update_sb_db(struct ovsdb_idl *ovs_idl, struct ovsdb_idl *ovnsb_idl,
         lflow_cache_enable(ctx->lflow_cache,
                            smap_get_bool(&cfg->external_ids,
                                          "ovn-enable-lflow-cache",
-                                         true));
+                                         true),
+                           smap_get_uint(&cfg->external_ids,
+                                         "ovn-limit-lflow-cache",
+                                         DEFAULT_LFLOW_CACHE_MAX_ENTRIES));
     }
 }
 
diff --git a/controller/test-lflow-cache.c b/controller/test-lflow-cache.c
index 79fe8c6..96cd6ff 100644
--- a/controller/test-lflow-cache.c
+++ b/controller/test-lflow-cache.c
@@ -102,7 +102,7 @@ test_lflow_cache_operations(struct ovs_cmdl_context *ctx)
     unsigned int shift = 2;
     unsigned int n_ops;
 
-    lflow_cache_enable(lc, enabled);
+    lflow_cache_enable(lc, enabled, UINT32_MAX);
     test_lflow_cache_stats__(lc);
 
     if (!test_read_uint_value(ctx, shift++, "n_ops", &n_ops)) {
@@ -150,11 +150,15 @@ test_lflow_cache_operations(struct ovs_cmdl_context *ctx)
             test_lflow_cache_delete__(lc, &lflow_uuid);
             test_lflow_cache_lookup__(lc, &lflow_uuid);
         } else if (!strcmp(op, "enable")) {
+            unsigned int limit;
+            if (!test_read_uint_value(ctx, shift++, "limit", &limit)) {
+                goto done;
+            }
             printf("ENABLE\n");
-            lflow_cache_enable(lc, true);
+            lflow_cache_enable(lc, true, limit);
         } else if (!strcmp(op, "disable")) {
             printf("DISABLE\n");
-            lflow_cache_enable(lc, false);
+            lflow_cache_enable(lc, false, UINT32_MAX);
         } else if (!strcmp(op, "flush")) {
             printf("FLUSH\n");
             lflow_cache_flush(lc);
@@ -173,7 +177,7 @@ test_lflow_cache_negative(struct ovs_cmdl_context *ctx OVS_UNUSED)
 {
     lflow_cache_flush(NULL);
     lflow_cache_destroy(NULL);
-    lflow_cache_enable(NULL, true);
+    lflow_cache_enable(NULL, true, UINT32_MAX);
     ovs_assert(!lflow_cache_is_enabled(NULL));
 
     struct ds ds = DS_EMPTY_INITIALIZER;
diff --git a/tests/ovn-lflow-cache.at b/tests/ovn-lflow-cache.at
index 5000165..aab7eda 100644
--- a/tests/ovn-lflow-cache.at
+++ b/tests/ovn-lflow-cache.at
@@ -146,7 +146,7 @@ AT_CHECK(
         add conj-id 4 \
         add expr 5 \
         add matches 6 \
-        enable \
+        enable 1000 \
         add conj-id 7 \
         add expr 8 \
         add matches 9 \
@@ -252,6 +252,108 @@ cache-matches   : 0
 ])
 AT_CLEANUP
 
+AT_SETUP([ovn -- unit test -- lflow-cache set limit])
+AT_CHECK(
+    [ovstest test-lflow-cache lflow_cache_operations \
+        true 8 \
+        add conj-id 1 \
+        add expr 2 \
+        add matches 3 \
+        enable 1 \
+        add conj-id 4 \
+        add expr 5 \
+        add matches 6 \
+        add conj-id 7],
+    [0], [dnl
+Enabled: true
+cache-conj-id   : 0
+cache-expr      : 0
+cache-matches   : 0
+ADD conj-id:
+  conj-id-ofs: 1
+LOOKUP:
+  conj_id_ofs: 1
+  type: conj-id
+Enabled: true
+cache-conj-id   : 1
+cache-expr      : 0
+cache-matches   : 0
+ADD expr:
+  conj-id-ofs: 2
+LOOKUP:
+  conj_id_ofs: 2
+  type: expr
+Enabled: true
+cache-conj-id   : 1
+cache-expr      : 1
+cache-matches   : 0
+ADD matches:
+  conj-id-ofs: 3
+LOOKUP:
+  conj_id_ofs: 0
+  type: matches
+Enabled: true
+cache-conj-id   : 1
+cache-expr      : 1
+cache-matches   : 1
+ENABLE
+dnl
+dnl Max capacity smaller than current usage, cache should be flushed.
+dnl
+Enabled: true
+cache-conj-id   : 0
+cache-expr      : 0
+cache-matches   : 0
+ADD conj-id:
+  conj-id-ofs: 4
+LOOKUP:
+  conj_id_ofs: 4
+  type: conj-id
+Enabled: true
+cache-conj-id   : 1
+cache-expr      : 0
+cache-matches   : 0
+ADD expr:
+  conj-id-ofs: 5
+LOOKUP:
+  conj_id_ofs: 5
+  type: expr
+dnl
+dnl Cache is full but we can evict the conj-id entry because we're adding
+dnl an expr one.
+dnl
+Enabled: true
+cache-conj-id   : 0
+cache-expr      : 1
+cache-matches   : 0
+ADD matches:
+  conj-id-ofs: 6
+LOOKUP:
+  conj_id_ofs: 0
+  type: matches
+dnl
+dnl Cache is full but we can evict the expr entry because we're adding
+dnl a matches one.
+dnl
+Enabled: true
+cache-conj-id   : 0
+cache-expr      : 0
+cache-matches   : 1
+ADD conj-id:
+  conj-id-ofs: 7
+LOOKUP:
+  not found
+dnl
+dnl Cache is full and we're adding a conj-id entry so we shouldn't evict
+dnl anything else.
+dnl
+Enabled: true
+cache-conj-id   : 0
+cache-expr      : 0
+cache-matches   : 1
+])
+AT_CLEANUP
+
 AT_SETUP([ovn -- unit test -- lflow-cache negative tests])
 AT_CHECK([ovstest test-lflow-cache lflow_cache_negative], [0], [])
 AT_CLEANUP



More information about the dev mailing list