[ovs-dev] [PATCH ovn v2 10/10] lflow-cache: Make max cache memory usage configurable.

Numan Siddique numans at ovn.org
Tue Feb 9 06:06:05 UTC 2021


On Thu, Feb 4, 2021 at 6:57 PM Dumitru Ceara <dceara at redhat.com> wrote:
>
> Add a new OVS external-id, "ovn-memlimit-lflow-cache-kb", through which
> users can specify the maximum amount of memory (in KB) ovn-controller
> can use for caching logical flows.
>
> To maintain backwards compatibility the default behavior is to not
> enforce any memory limit on the size of the cache.
>
> Add lflow cache reports to "ovn-appctl -t ovn-controller memory/show".
>
> The memory usage for cache entries of type LCACHE_T_EXPR is computed by
> doing another pass through the expression tree.  While this adds a few
> extra cycles, entries of type LCACHE_T_EXPR shouldn't be very common
> because they are created only for flows with matches that include
> "is_chassis_resident()" but do not reference port groups and address sets.
>
> Signed-off-by: Dumitru Ceara <dceara at redhat.com>

Hi Dumitru,

Thanks for working on this series.

I have one comment. I think it will be useful to report the used
memory by the cache
in the reply to the command - lflow-cache/show-stats.

Thanks
Numan

> ---
>  NEWS                            |    8 +++--
>  controller/chassis.c            |   21 +++++++++++++
>  controller/lflow-cache.c        |   63 ++++++++++++++++++++++++++++++---------
>  controller/lflow-cache.h        |   13 ++++++--
>  controller/lflow.c              |    8 +++--
>  controller/ovn-controller.8.xml |    7 ++++
>  controller/ovn-controller.c     |    8 ++++-
>  controller/test-lflow-cache.c   |   31 +++++++++++++------
>  include/ovn/expr.h              |    3 +-
>  lib/expr.c                      |   39 ++++++++++++++++++++++++
>  tests/ovn-lflow-cache.at        |   54 +++++++++++++++++++++++++++++++--
>  11 files changed, 213 insertions(+), 42 deletions(-)
>
> diff --git a/NEWS b/NEWS
> index 6e10557..ddb5977 100644
> --- a/NEWS
> +++ b/NEWS
> @@ -16,9 +16,11 @@ Post-v20.12.0
>    - Add a new option to Load_Balancer.options, "hairpin_snat_ip", to allow
>      users to explicitly select which source IP should be used for load
>      balancer hairpin traffic.
> -  - ovn-controller: Add a configuration knob, through OVS external-id
> -    "ovn-limit-lflow-cache", to allow enforcing a limit for the size of the
> -    logical flow cache.
> +  - ovn-controller: Add configuration knobs, through OVS external-id
> +    "ovn-limit-lflow-cache" and "ovn-memlimit-lflow-cache-kb", to allow
> +    enforcing a limit for the size of the logical flow cache based on
> +    maximum number of entries and/or memory usage.
> +  - ovn-controller: Add lflow cache related memory reports.
>
>  OVN v20.12.0 - 18 Dec 2020
>  --------------------------
> diff --git a/controller/chassis.c b/controller/chassis.c
> index c66837a..a4a264c 100644
> --- a/controller/chassis.c
> +++ b/controller/chassis.c
> @@ -50,6 +50,7 @@ struct ovs_chassis_cfg {
>      const char *chassis_macs;
>      const char *enable_lflow_cache;
>      const char *limit_lflow_cache;
> +    const char *memlimit_lflow_cache;
>
>      /* Set of encap types parsed from the 'ovn-encap-type' external-id. */
>      struct sset encap_type_set;
> @@ -142,6 +143,12 @@ get_limit_lflow_cache(const struct smap *ext_ids)
>  }
>
>  static const char *
> +get_memlimit_lflow_cache(const struct smap *ext_ids)
> +{
> +    return smap_get_def(ext_ids, "ovn-memlimit-lflow-cache-kb", "");
> +}
> +
> +static const char *
>  get_encap_csum(const struct smap *ext_ids)
>  {
>      return smap_get_def(ext_ids, "ovn-encap-csum", "true");
> @@ -264,6 +271,8 @@ chassis_parse_ovs_config(const struct ovsrec_open_vswitch_table *ovs_table,
>      ovs_cfg->chassis_macs = get_chassis_mac_mappings(&cfg->external_ids);
>      ovs_cfg->enable_lflow_cache = get_enable_lflow_cache(&cfg->external_ids);
>      ovs_cfg->limit_lflow_cache = get_limit_lflow_cache(&cfg->external_ids);
> +    ovs_cfg->memlimit_lflow_cache =
> +        get_memlimit_lflow_cache(&cfg->external_ids);
>
>      if (!chassis_parse_ovs_encap_type(encap_type, &ovs_cfg->encap_type_set)) {
>          return false;
> @@ -293,6 +302,7 @@ chassis_build_other_config(struct smap *config, const char *bridge_mappings,
>                             const char *iface_types,
>                             const char *enable_lflow_cache,
>                             const char *limit_lflow_cache,
> +                           const char *memlimit_lflow_cache,
>                             bool is_interconn)
>  {
>      smap_replace(config, "ovn-bridge-mappings", bridge_mappings);
> @@ -301,6 +311,7 @@ chassis_build_other_config(struct smap *config, const char *bridge_mappings,
>      smap_replace(config, "ovn-monitor-all", monitor_all);
>      smap_replace(config, "ovn-enable-lflow-cache", enable_lflow_cache);
>      smap_replace(config, "ovn-limit-lflow-cache", limit_lflow_cache);
> +    smap_replace(config, "ovn-memlimit-lflow-cache-kb", memlimit_lflow_cache);
>      smap_replace(config, "iface-types", iface_types);
>      smap_replace(config, "ovn-chassis-mac-mappings", chassis_macs);
>      smap_replace(config, "is-interconn", is_interconn ? "true" : "false");
> @@ -317,6 +328,7 @@ chassis_other_config_changed(const char *bridge_mappings,
>                               const char *chassis_macs,
>                               const char *enable_lflow_cache,
>                               const char *limit_lflow_cache,
> +                             const char *memlimit_lflow_cache,
>                               const struct ds *iface_types,
>                               bool is_interconn,
>                               const struct sbrec_chassis *chassis_rec)
> @@ -363,6 +375,13 @@ chassis_other_config_changed(const char *bridge_mappings,
>          return true;
>      }
>
> +    const char *chassis_memlimit_lflow_cache =
> +        get_memlimit_lflow_cache(&chassis_rec->other_config);
> +
> +    if (strcmp(memlimit_lflow_cache, chassis_memlimit_lflow_cache)) {
> +        return true;
> +    }
> +
>      const char *chassis_mac_mappings =
>          get_chassis_mac_mappings(&chassis_rec->other_config);
>      if (strcmp(chassis_macs, chassis_mac_mappings)) {
> @@ -543,6 +562,7 @@ chassis_update(const struct sbrec_chassis *chassis_rec,
>                                       ovs_cfg->chassis_macs,
>                                       ovs_cfg->enable_lflow_cache,
>                                       ovs_cfg->limit_lflow_cache,
> +                                     ovs_cfg->memlimit_lflow_cache,
>                                       &ovs_cfg->iface_types,
>                                       ovs_cfg->is_interconn,
>                                       chassis_rec)) {
> @@ -557,6 +577,7 @@ chassis_update(const struct sbrec_chassis *chassis_rec,
>                                     ds_cstr_ro(&ovs_cfg->iface_types),
>                                     ovs_cfg->enable_lflow_cache,
>                                     ovs_cfg->limit_lflow_cache,
> +                                   ovs_cfg->memlimit_lflow_cache,
>                                     ovs_cfg->is_interconn);
>          sbrec_chassis_verify_other_config(chassis_rec);
>          sbrec_chassis_set_other_config(chassis_rec, &other_config);
> diff --git a/controller/lflow-cache.c b/controller/lflow-cache.c
> index 342b20a..60a8389 100644
> --- a/controller/lflow-cache.c
> +++ b/controller/lflow-cache.c
> @@ -38,6 +38,7 @@ COVERAGE_DEFINE(lflow_cache_hit);
>  COVERAGE_DEFINE(lflow_cache_miss);
>  COVERAGE_DEFINE(lflow_cache_delete);
>  COVERAGE_DEFINE(lflow_cache_full);
> +COVERAGE_DEFINE(lflow_cache_mem_full);
>  COVERAGE_DEFINE(lflow_cache_made_room);
>
>  const char *lflow_cache_type_names[LCACHE_T_MAX] = {
> @@ -49,12 +50,15 @@ const char *lflow_cache_type_names[LCACHE_T_MAX] = {
>  struct lflow_cache {
>      struct hmap entries[LCACHE_T_MAX];
>      uint32_t capacity;
> +    uint64_t mem_usage;
> +    uint64_t max_mem_usage;
>      bool enabled;
>  };
>
>  struct lflow_cache_entry {
>      struct hmap_node node;
>      struct uuid lflow_uuid; /* key */
> +    size_t size;
>
>      struct lflow_cache_value value;
>  };
> @@ -65,7 +69,8 @@ static bool lflow_cache_make_room__(struct lflow_cache *lc,
>  static struct lflow_cache_value *lflow_cache_add__(
>      struct lflow_cache *lc,
>      const struct sbrec_logical_flow *lflow,
> -    enum lflow_cache_type type);
> +    enum lflow_cache_type type,
> +    uint64_t value_size);
>  static void lflow_cache_delete__(struct lflow_cache *lc,
>                                   struct lflow_cache_entry *lce);
>
> @@ -79,6 +84,7 @@ lflow_cache_create(void)
>      }
>
>      lc->enabled = true;
> +    lc->mem_usage = 0;
>      return lc;
>  }
>
> @@ -120,18 +126,24 @@ lflow_cache_destroy(struct lflow_cache *lc)
>  }
>
>  void
> -lflow_cache_enable(struct lflow_cache *lc, bool enabled, uint32_t capacity)
> +lflow_cache_enable(struct lflow_cache *lc, bool enabled, uint32_t capacity,
> +                   uint64_t max_mem_usage_kb)
>  {
>      if (!lc) {
>          return;
>      }
>
> -    if ((lc->enabled && !enabled) || capacity < lflow_cache_n_entries__(lc)) {
> +    uint64_t max_mem_usage = max_mem_usage_kb * 1024;
> +
> +    if ((lc->enabled && !enabled)
> +            || capacity < lflow_cache_n_entries__(lc)
> +            || max_mem_usage < lc->mem_usage) {
>          lflow_cache_flush(lc);
>      }
>
>      lc->enabled = enabled;
>      lc->capacity = capacity;
> +    lc->max_mem_usage = max_mem_usage;
>  }
>
>  bool
> @@ -161,8 +173,7 @@ lflow_cache_add_conj_id(struct lflow_cache *lc,
>                          uint32_t conj_id_ofs)
>  {
>      struct lflow_cache_value *lcv =
> -        lflow_cache_add__(lc, lflow, LCACHE_T_CONJ_ID);
> -
> +        lflow_cache_add__(lc, lflow, LCACHE_T_CONJ_ID, 0);
>      if (!lcv) {
>          return;
>      }
> @@ -173,12 +184,11 @@ lflow_cache_add_conj_id(struct lflow_cache *lc,
>  void
>  lflow_cache_add_expr(struct lflow_cache *lc,
>                       const struct sbrec_logical_flow *lflow,
> -                     uint32_t conj_id_ofs,
> -                     struct expr *expr)
> +                     uint32_t conj_id_ofs, struct expr *expr,
> +                     size_t expr_sz)
>  {
>      struct lflow_cache_value *lcv =
> -        lflow_cache_add__(lc, lflow, LCACHE_T_EXPR);
> -
> +        lflow_cache_add__(lc, lflow, LCACHE_T_EXPR, expr_sz);
>      if (!lcv) {
>          expr_destroy(expr);
>          return;
> @@ -191,11 +201,10 @@ lflow_cache_add_expr(struct lflow_cache *lc,
>  void
>  lflow_cache_add_matches(struct lflow_cache *lc,
>                          const struct sbrec_logical_flow *lflow,
> -                        struct hmap *matches)
> +                        struct hmap *matches, size_t matches_sz)
>  {
>      struct lflow_cache_value *lcv =
> -        lflow_cache_add__(lc, lflow, LCACHE_T_MATCHES);
> -
> +        lflow_cache_add__(lc, lflow, LCACHE_T_MATCHES, matches_sz);
>      if (!lcv) {
>          expr_matches_destroy(matches);
>          free(matches);
> @@ -271,15 +280,36 @@ lflow_cache_make_room__(struct lflow_cache *lc, enum lflow_cache_type type)
>      return false;
>  }
>
> +void
> +lflow_cache_get_memory_usage(const struct lflow_cache *lc, struct simap *usage)
> +{
> +    for (size_t i = 0; i < LCACHE_T_MAX; i++) {
> +        char *counter_name = xasprintf("lflow-cache-entries-%s",
> +                                       lflow_cache_type_names[i]);
> +        simap_increase(usage, counter_name, hmap_count(&lc->entries[i]));
> +        free(counter_name);
> +    }
> +    simap_increase(usage, "lflow-cache-size-KB",
> +                   ROUND_UP(lc->mem_usage, 1024) / 1024);
> +}
> +
>  static struct lflow_cache_value *
>  lflow_cache_add__(struct lflow_cache *lc,
>                    const struct sbrec_logical_flow *lflow,
> -                  enum lflow_cache_type type)
> +                  enum lflow_cache_type type,
> +                  uint64_t value_size)
>  {
>      if (!lflow_cache_is_enabled(lc) || !lflow) {
>          return NULL;
>      }
>
> +    struct lflow_cache_entry *lce;
> +    size_t size = sizeof *lce + value_size;
> +    if (size + lc->mem_usage > lc->max_mem_usage) {
> +        COVERAGE_INC(lflow_cache_mem_full);
> +        return NULL;
> +    }
> +
>      if (lflow_cache_n_entries__(lc) == lc->capacity) {
>          if (!lflow_cache_make_room__(lc, type)) {
>              COVERAGE_INC(lflow_cache_full);
> @@ -289,10 +319,12 @@ lflow_cache_add__(struct lflow_cache *lc,
>          }
>      }
>
> -    struct lflow_cache_entry *lce = xzalloc(sizeof *lce);
> +    lc->mem_usage += size;
>
>      COVERAGE_INC(lflow_cache_add);
> +    lce = xzalloc(sizeof *lce);
>      lce->lflow_uuid = lflow->header_.uuid;
> +    lce->size = size;
>      lce->value.type = type;
>      hmap_insert(&lc->entries[type], &lce->node,
>                  uuid_hash(&lflow->header_.uuid));
> @@ -324,5 +356,8 @@ lflow_cache_delete__(struct lflow_cache *lc, struct lflow_cache_entry *lce)
>          free(lce->value.expr_matches);
>          break;
>      }
> +
> +    ovs_assert(lc->mem_usage >= lce->size);
> +    lc->mem_usage -= lce->size;
>      free(lce);
>  }
> diff --git a/controller/lflow-cache.h b/controller/lflow-cache.h
> index 23c64a0..411eb35 100644
> --- a/controller/lflow-cache.h
> +++ b/controller/lflow-cache.h
> @@ -20,6 +20,7 @@
>
>  #include "openvswitch/hmap.h"
>  #include "openvswitch/uuid.h"
> +#include "simap.h"
>
>  struct sbrec_logical_flow;
>  struct lflow_cache;
> @@ -61,7 +62,8 @@ struct lflow_cache_stats {
>  struct lflow_cache *lflow_cache_create(void);
>  void lflow_cache_flush(struct lflow_cache *);
>  void lflow_cache_destroy(struct lflow_cache *);
> -void lflow_cache_enable(struct lflow_cache *, bool enabled, uint32_t capacity);
> +void lflow_cache_enable(struct lflow_cache *, bool enabled, uint32_t capacity,
> +                        uint64_t max_mem_usage_kb);
>  bool lflow_cache_is_enabled(struct lflow_cache *);
>  struct lflow_cache_stats *lflow_cache_get_stats(const struct lflow_cache *);
>
> @@ -70,15 +72,18 @@ void lflow_cache_add_conj_id(struct lflow_cache *,
>                               uint32_t conj_id_ofs);
>  void lflow_cache_add_expr(struct lflow_cache *,
>                            const struct sbrec_logical_flow *,
> -                          uint32_t conj_id_ofs,
> -                          struct expr *expr);
> +                          uint32_t conj_id_ofs, struct expr *expr,
> +                          size_t expr_sz);
>  void lflow_cache_add_matches(struct lflow_cache *,
>                               const struct sbrec_logical_flow *,
> -                             struct hmap *matches);
> +                             struct hmap *matches, size_t matches_sz);
>
>  struct lflow_cache_value *lflow_cache_get(struct lflow_cache *,
>                                            const struct sbrec_logical_flow *);
>  void lflow_cache_delete(struct lflow_cache *,
>                          const struct sbrec_logical_flow *);
>
> +void lflow_cache_get_memory_usage(const struct lflow_cache *,
> +                                  struct simap *usage);
> +
>  #endif /* controller/lflow-cache.h */
> diff --git a/controller/lflow.c b/controller/lflow.c
> index c493652..a8fa052 100644
> --- a/controller/lflow.c
> +++ b/controller/lflow.c
> @@ -797,6 +797,7 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
>
>      struct expr *cached_expr = NULL, *expr = NULL;
>      struct hmap *matches = NULL;
> +    size_t matches_size = 0;
>
>      bool is_cr_cond_present = false;
>      bool pg_addr_set_ref = false;
> @@ -851,7 +852,7 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
>      case LCACHE_T_EXPR:
>          matches = xmalloc(sizeof *matches);
>          n_conjs = expr_to_matches(expr, lookup_port_cb, &aux, matches);
> -        expr_matches_prepare(matches, conj_id_ofs);
> +        matches_size = expr_matches_prepare(matches, conj_id_ofs);
>          if (hmap_is_empty(matches)) {
>              VLOG_DBG("lflow "UUID_FMT" matches are empty, skip",
>                      UUID_ARGS(&lflow->header_.uuid));
> @@ -878,11 +879,12 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
>          if (lflow_cache_is_enabled(l_ctx_out->lflow_cache)) {
>              if (cached_expr && !is_cr_cond_present) {
>                  lflow_cache_add_matches(l_ctx_out->lflow_cache, lflow,
> -                                        matches);
> +                                        matches, matches_size);
>                  matches = NULL;
>              } else if (cached_expr) {
>                  lflow_cache_add_expr(l_ctx_out->lflow_cache, lflow,
> -                                     conj_id_ofs, cached_expr);
> +                                     conj_id_ofs, cached_expr,
> +                                     expr_size(cached_expr));
>                  cached_expr = NULL;
>              } else if (n_conjs) {
>                  lflow_cache_add_conj_id(l_ctx_out->lflow_cache, lflow,
> diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml
> index e92db6d..75c6596 100644
> --- a/controller/ovn-controller.8.xml
> +++ b/controller/ovn-controller.8.xml
> @@ -265,6 +265,13 @@
>          when the logical flow cache is enabled.  By default the size of the
>          cache is unlimited.
>        </dd>
> +      <dt><code>external_ids:ovn-memlimit-lflow-cache-kb</code></dt>
> +      <dd>
> +        When used, this configuration value determines the maximum size of
> +        the logical flow cache (in KB) <code>ovn-controller</code> may create
> +        when the logical flow cache is enabled.  By default the size of the
> +        cache is unlimited.
> +      </dd>
>      </dl>
>
>      <p>
> diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
> index dd67d7f..138d95a 100644
> --- a/controller/ovn-controller.c
> +++ b/controller/ovn-controller.c
> @@ -98,6 +98,7 @@ OVS_NO_RETURN static void usage(void);
>
>  /* By default don't set an upper bound for the lflow cache. */
>  #define DEFAULT_LFLOW_CACHE_MAX_ENTRIES UINT32_MAX
> +#define DEFAULT_LFLOW_CACHE_MAX_MEM_KB (UINT64_MAX / 1024)
>
>  struct controller_engine_ctx {
>      struct lflow_cache *lflow_cache;
> @@ -588,7 +589,10 @@ update_sb_db(struct ovsdb_idl *ovs_idl, struct ovsdb_idl *ovnsb_idl,
>                                           true),
>                             smap_get_uint(&cfg->external_ids,
>                                           "ovn-limit-lflow-cache",
> -                                         DEFAULT_LFLOW_CACHE_MAX_ENTRIES));
> +                                         DEFAULT_LFLOW_CACHE_MAX_ENTRIES),
> +                           smap_get_ullong(&cfg->external_ids,
> +                                           "ovn-memlimit-lflow-cache-kb",
> +                                           DEFAULT_LFLOW_CACHE_MAX_MEM_KB));
>      }
>  }
>
> @@ -2710,7 +2714,7 @@ main(int argc, char *argv[])
>          if (memory_should_report()) {
>              struct simap usage = SIMAP_INITIALIZER(&usage);
>
> -            /* Nothing special to report yet. */
> +            lflow_cache_get_memory_usage(ctrl_engine_ctx.lflow_cache, &usage);
>              memory_report(&usage);
>              simap_destroy(&usage);
>          }
> diff --git a/controller/test-lflow-cache.c b/controller/test-lflow-cache.c
> index 45262b4..ef5427d 100644
> --- a/controller/test-lflow-cache.c
> +++ b/controller/test-lflow-cache.c
> @@ -23,6 +23,9 @@
>
>  #include "lflow-cache.h"
>
> +/* Simulate 1KB large cache values. */
> +#define TEST_LFLOW_CACHE_VALUE_SIZE 1024
> +
>  static void
>  test_lflow_cache_add__(struct lflow_cache *lc, const char *op_type,
>                         const struct sbrec_logical_flow *lflow,
> @@ -35,12 +38,14 @@ test_lflow_cache_add__(struct lflow_cache *lc, const char *op_type,
>      if (!strcmp(op_type, "conj-id")) {
>          lflow_cache_add_conj_id(lc, lflow, conj_id_ofs);
>      } else if (!strcmp(op_type, "expr")) {
> -        lflow_cache_add_expr(lc, lflow, conj_id_ofs, expr_clone(e));
> +        lflow_cache_add_expr(lc, lflow, conj_id_ofs, expr_clone(e),
> +                             TEST_LFLOW_CACHE_VALUE_SIZE);
>      } else if (!strcmp(op_type, "matches")) {
>          struct hmap *matches = xmalloc(sizeof *matches);
>          ovs_assert(expr_to_matches(e, NULL, NULL, matches) == 0);
>          ovs_assert(hmap_count(matches) == 1);
> -        lflow_cache_add_matches(lc, lflow, matches);
> +        lflow_cache_add_matches(lc, lflow, matches,
> +                                TEST_LFLOW_CACHE_VALUE_SIZE);
>      } else {
>          OVS_NOT_REACHED();
>      }
> @@ -108,7 +113,7 @@ test_lflow_cache_operations(struct ovs_cmdl_context *ctx)
>      unsigned int shift = 2;
>      unsigned int n_ops;
>
> -    lflow_cache_enable(lc, enabled, UINT32_MAX);
> +    lflow_cache_enable(lc, enabled, UINT32_MAX, UINT32_MAX);
>      test_lflow_cache_stats__(lc);
>
>      if (!test_read_uint_value(ctx, shift++, "n_ops", &n_ops)) {
> @@ -157,14 +162,19 @@ test_lflow_cache_operations(struct ovs_cmdl_context *ctx)
>              test_lflow_cache_lookup__(lc, &lflow);
>          } else if (!strcmp(op, "enable")) {
>              unsigned int limit;
> +            unsigned int mem_limit_kb;
>              if (!test_read_uint_value(ctx, shift++, "limit", &limit)) {
>                  goto done;
>              }
> +            if (!test_read_uint_value(ctx, shift++, "mem-limit",
> +                                      &mem_limit_kb)) {
> +                goto done;
> +            }
>              printf("ENABLE\n");
> -            lflow_cache_enable(lc, true, limit);
> +            lflow_cache_enable(lc, true, limit, mem_limit_kb);
>          } else if (!strcmp(op, "disable")) {
>              printf("DISABLE\n");
> -            lflow_cache_enable(lc, false, UINT32_MAX);
> +            lflow_cache_enable(lc, false, UINT32_MAX, UINT32_MAX);
>          } else if (!strcmp(op, "flush")) {
>              printf("FLUSH\n");
>              lflow_cache_flush(lc);
> @@ -183,7 +193,7 @@ test_lflow_cache_negative(struct ovs_cmdl_context *ctx OVS_UNUSED)
>  {
>      lflow_cache_flush(NULL);
>      lflow_cache_destroy(NULL);
> -    lflow_cache_enable(NULL, true, UINT32_MAX);
> +    lflow_cache_enable(NULL, true, UINT32_MAX, UINT32_MAX);
>      ovs_assert(!lflow_cache_is_enabled(NULL));
>      ovs_assert(!lflow_cache_get_stats(NULL));
>
> @@ -200,10 +210,11 @@ test_lflow_cache_negative(struct ovs_cmdl_context *ctx OVS_UNUSED)
>          ovs_assert(hmap_count(matches) == 1);
>
>          lflow_cache_add_conj_id(lcs[i], NULL, 0);
> -        lflow_cache_add_expr(lcs[i], NULL, 0, NULL);
> -        lflow_cache_add_expr(lcs[i], NULL, 0, e);
> -        lflow_cache_add_matches(lcs[i], NULL, NULL);
> -        lflow_cache_add_matches(lcs[i], NULL, matches);
> +        lflow_cache_add_expr(lcs[i], NULL, 0, NULL, 0);
> +        lflow_cache_add_expr(lcs[i], NULL, 0, e, expr_size(e));
> +        lflow_cache_add_matches(lcs[i], NULL, NULL, 0);
> +        lflow_cache_add_matches(lcs[i], NULL, matches,
> +                                TEST_LFLOW_CACHE_VALUE_SIZE);
>          lflow_cache_destroy(lcs[i]);
>      }
>  }
> diff --git a/include/ovn/expr.h b/include/ovn/expr.h
> index c2c8218..0323700 100644
> --- a/include/ovn/expr.h
> +++ b/include/ovn/expr.h
> @@ -413,6 +413,7 @@ expr_from_node(const struct ovs_list *node)
>
>  void expr_format(const struct expr *, struct ds *);
>  void expr_print(const struct expr *);
> +size_t expr_size(const struct expr *);
>  struct expr *expr_parse(struct lexer *, const struct shash *symtab,
>                          const struct shash *addr_sets,
>                          const struct shash *port_groups,
> @@ -477,7 +478,7 @@ uint32_t expr_to_matches(const struct expr *,
>                           const void *aux,
>                           struct hmap *matches);
>  void expr_matches_destroy(struct hmap *matches);
> -void expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs);
> +size_t expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs);
>  void expr_matches_print(const struct hmap *matches, FILE *);
>
>  /* Action parsing helper. */
> diff --git a/lib/expr.c b/lib/expr.c
> index 356e6fe..f061a8f 100644
> --- a/lib/expr.c
> +++ b/lib/expr.c
> @@ -471,6 +471,36 @@ expr_print(const struct expr *e)
>      ds_destroy(&output);
>  }
>
> +/* Expr Size. */
> +size_t
> +expr_size(const struct expr *expr) {
> +    size_t total_sz = sizeof *expr;
> +    const struct expr *subexpr;
> +
> +    switch (expr->type) {
> +    case EXPR_T_CMP:
> +        return total_sz + (expr->cmp.symbol->width
> +               ? 0
> +               : strlen(expr->cmp.string));
> +
> +    case EXPR_T_AND:
> +    case EXPR_T_OR:
> +        LIST_FOR_EACH (subexpr, node, &expr->andor) {
> +            total_sz += expr_size(subexpr);
> +        }
> +        return total_sz;
> +
> +    case EXPR_T_BOOLEAN:
> +        return total_sz;
> +
> +    case EXPR_T_CONDITION:
> +        return total_sz + strlen(expr->cond.string);
> +
> +    default:
> +        OVS_NOT_REACHED();
> +    }
> +}
> +
>  /* Parsing. */
>
>  #define MAX_PAREN_DEPTH 100
> @@ -3127,11 +3157,16 @@ expr_to_matches(const struct expr *expr,
>
>  /* Prepares the expr matches in the hmap 'matches' by updating the
>   * conj id offsets specified in 'conj_id_ofs'.
> + *
> + * Returns the total size (in bytes) of the matches data structure, including
> + * individual match entries.
>   */
> -void
> +size_t
>  expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs)
>  {
> +    size_t total_size = sizeof *matches;
>      struct expr_match *m;
> +
>      HMAP_FOR_EACH (m, hmap_node, matches) {
>          if (m->match.wc.masks.conj_id) {
>              m->match.flow.conj_id += conj_id_ofs;
> @@ -3141,7 +3176,9 @@ expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs)
>              struct cls_conjunction *src = &m->conjunctions[i];
>              src->id += conj_id_ofs;
>          }
> +        total_size += sizeof *m + m->allocated * sizeof *m->conjunctions;
>      }
> +    return total_size;
>  }
>
>  /* Destroys all of the 'struct expr_match'es in 'matches', as well as the
> diff --git a/tests/ovn-lflow-cache.at b/tests/ovn-lflow-cache.at
> index 2cbc1f6..e3b67e5 100644
> --- a/tests/ovn-lflow-cache.at
> +++ b/tests/ovn-lflow-cache.at
> @@ -146,7 +146,7 @@ AT_CHECK(
>          add conj-id 4 \
>          add expr 5 \
>          add matches 6 \
> -        enable 1000 \
> +        enable 1000 1024 \
>          add conj-id 7 \
>          add expr 8 \
>          add matches 9 \
> @@ -255,15 +255,19 @@ AT_CLEANUP
>  AT_SETUP([ovn -- unit test -- lflow-cache set limit])
>  AT_CHECK(
>      [ovstest test-lflow-cache lflow_cache_operations \
> -        true 8 \
> +        true 12 \
>          add conj-id 1 \
>          add expr 2 \
>          add matches 3 \
> -        enable 1 \
> +        enable 1 1024 \
>          add conj-id 4 \
>          add expr 5 \
>          add matches 6 \
> -        add conj-id 7],
> +        add conj-id 7 \
> +        enable 1 1 \
> +        add conj-id 8 \
> +        add expr 9 \
> +        add matches 10],
>      [0], [dnl
>  Enabled: true
>    cache-conj-id: 0
> @@ -351,6 +355,48 @@ Enabled: true
>    cache-conj-id: 0
>    cache-expr: 0
>    cache-matches: 1
> +ENABLE
> +dnl
> +dnl Max memory usage smaller than current memory usage, cache should be
> +dnl flushed.
> +dnl
> +Enabled: true
> +  cache-conj-id: 0
> +  cache-expr: 0
> +  cache-matches: 0
> +ADD conj-id:
> +  conj-id-ofs: 8
> +LOOKUP:
> +  conj_id_ofs: 8
> +  type: conj-id
> +Enabled: true
> +  cache-conj-id: 1
> +  cache-expr: 0
> +  cache-matches: 0
> +ADD expr:
> +  conj-id-ofs: 9
> +LOOKUP:
> +  not found
> +dnl
> +dnl Cache is full and we're adding a cache entry that would go over the max
> +dnl memory limit so adding should fail.
> +dnl
> +Enabled: true
> +  cache-conj-id: 1
> +  cache-expr: 0
> +  cache-matches: 0
> +ADD matches:
> +  conj-id-ofs: 10
> +LOOKUP:
> +  not found
> +dnl
> +dnl Cache is full and we're adding a cache entry that would go over the max
> +dnl memory limit so adding should fail.
> +dnl
> +Enabled: true
> +  cache-conj-id: 1
> +  cache-expr: 0
> +  cache-matches: 0
>  ])
>  AT_CLEANUP
>
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>


More information about the dev mailing list