[ovs-dev] [PATCH ovn 2/6] lflow-cache: Move the lflow cache to its own module.
Dumitru Ceara
dceara at redhat.com
Thu Jan 28 16:25:39 UTC 2021
This abstracts the implementation details of the ovn-controller logical
flow cache and also has refactors how the cache is being used allowing
further commits to expand its functionality.
Signed-off-by: Dumitru Ceara <dceara at redhat.com>
---
controller/automake.mk | 2
controller/lflow-cache.c | 195 +++++++++++++++++++++++++
controller/lflow-cache.h | 75 ++++++++++
controller/lflow.c | 337 ++++++++++++++-----------------------------
controller/lflow.h | 6 -
controller/ovn-controller.c | 53 +++----
lib/expr.c | 4 +
7 files changed, 408 insertions(+), 264 deletions(-)
create mode 100644 controller/lflow-cache.c
create mode 100644 controller/lflow-cache.h
diff --git a/controller/automake.mk b/controller/automake.mk
index 480578e..75119a6 100644
--- a/controller/automake.mk
+++ b/controller/automake.mk
@@ -14,6 +14,8 @@ controller_ovn_controller_SOURCES = \
controller/ip-mcast.h \
controller/lflow.c \
controller/lflow.h \
+ controller/lflow-cache.c \
+ controller/lflow-cache.h \
controller/lport.c \
controller/lport.h \
controller/ofctrl.c \
diff --git a/controller/lflow-cache.c b/controller/lflow-cache.c
new file mode 100644
index 0000000..9ebd226
--- /dev/null
+++ b/controller/lflow-cache.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2015, 2016 Nicira, Inc.
+ * Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "lib/ovn-sb-idl.h"
+#include "lflow-cache.h"
+#include "ovn/expr.h"
+
+struct lflow_cache {
+ struct hmap entries;
+ bool enabled;
+};
+
+struct lflow_cache_entry {
+ struct hmap_node node;
+ struct uuid lflow_uuid; /* key */
+
+ struct lflow_cache_value value;
+};
+
+static struct lflow_cache_value *lflow_cache_add__(
+ struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow,
+ enum lflow_cache_type type);
+static void lflow_cache_delete__(struct lflow_cache *lc,
+ struct lflow_cache_entry *lce);
+
+struct lflow_cache *
+lflow_cache_create(void)
+{
+ struct lflow_cache *lc = xmalloc(sizeof *lc);
+
+ hmap_init(&lc->entries);
+ lc->enabled = true;
+ return lc;
+}
+
+void
+lflow_cache_flush(struct lflow_cache *lc)
+{
+ struct lflow_cache_entry *lce;
+ struct lflow_cache_entry *lce_next;
+
+ HMAP_FOR_EACH_SAFE (lce, lce_next, node, &lc->entries) {
+ lflow_cache_delete__(lc, lce);
+ }
+}
+
+void
+lflow_cache_destroy(struct lflow_cache *lc)
+{
+ if (!lc) {
+ return;
+ }
+
+ lflow_cache_flush(lc);
+ hmap_destroy(&lc->entries);
+ free(lc);
+}
+
+void
+lflow_cache_enable(struct lflow_cache *lc, bool enabled)
+{
+ if (lc->enabled && !enabled) {
+ lflow_cache_flush(lc);
+ }
+ lc->enabled = enabled;
+}
+
+bool
+lflow_cache_is_enabled(struct lflow_cache *lc)
+{
+ return lc->enabled;
+}
+
+void
+lflow_cache_add_conj_id(struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow,
+ uint32_t conj_id_ofs)
+{
+ struct lflow_cache_value *lcv =
+ lflow_cache_add__(lc, lflow, LCACHE_T_CONJ_ID);
+
+ lcv->conj_id_ofs = conj_id_ofs;
+}
+
+void
+lflow_cache_add_expr(struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow,
+ uint32_t conj_id_ofs,
+ struct expr *expr)
+{
+ struct lflow_cache_value *lcv =
+ lflow_cache_add__(lc, lflow, LCACHE_T_EXPR);
+
+ lcv->conj_id_ofs = conj_id_ofs;
+ lcv->expr = expr;
+}
+
+void
+lflow_cache_add_matches(struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow,
+ struct hmap *matches)
+{
+ struct lflow_cache_value *lcv =
+ lflow_cache_add__(lc, lflow, LCACHE_T_MATCHES);
+
+ lcv->expr_matches = matches;
+}
+
+struct lflow_cache_value *
+lflow_cache_get(struct lflow_cache *lc, const struct sbrec_logical_flow *lflow)
+{
+ if (!lflow_cache_is_enabled(lc)) {
+ return NULL;
+ }
+
+ size_t hash = uuid_hash(&lflow->header_.uuid);
+ struct lflow_cache_entry *lce;
+
+ HMAP_FOR_EACH_WITH_HASH (lce, node, hash, &lc->entries) {
+ if (uuid_equals(&lce->lflow_uuid, &lflow->header_.uuid)) {
+ return &lce->value;
+ }
+ }
+ return NULL;
+}
+
+void
+lflow_cache_delete(struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow)
+{
+ if (!lc) {
+ return;
+ }
+
+ struct lflow_cache_value *lcv = lflow_cache_get(lc, lflow);
+ if (lcv) {
+ lflow_cache_delete__(lc, CONTAINER_OF(lcv, struct lflow_cache_entry,
+ value));
+ }
+}
+
+static struct lflow_cache_value *
+lflow_cache_add__(struct lflow_cache *lc,
+ const struct sbrec_logical_flow *lflow,
+ enum lflow_cache_type type)
+{
+ struct lflow_cache_entry *lce = xzalloc(sizeof *lce);
+
+ lce->lflow_uuid = lflow->header_.uuid;
+ lce->value.type = type;
+ hmap_insert(&lc->entries, &lce->node, uuid_hash(&lflow->header_.uuid));
+ return &lce->value;
+}
+
+static void
+lflow_cache_delete__(struct lflow_cache *lc, struct lflow_cache_entry *lce)
+{
+ if (!lce) {
+ return;
+ }
+
+ hmap_remove(&lc->entries, &lce->node);
+ switch (lce->value.type) {
+ case LCACHE_T_NONE:
+ OVS_NOT_REACHED();
+ break;
+ case LCACHE_T_CONJ_ID:
+ break;
+ case LCACHE_T_EXPR:
+ expr_destroy(lce->value.expr);
+ break;
+ case LCACHE_T_MATCHES:
+ expr_matches_destroy(lce->value.expr_matches);
+ free(lce->value.expr_matches);
+ break;
+ }
+ free(lce);
+}
diff --git a/controller/lflow-cache.h b/controller/lflow-cache.h
new file mode 100644
index 0000000..5c622d5
--- /dev/null
+++ b/controller/lflow-cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, 2016 Nicira, Inc.
+ * Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LFLOW_CACHE_H
+#define LFLOW_CACHE_H 1
+
+#include "openvswitch/hmap.h"
+#include "openvswitch/uuid.h"
+
+struct sbrec_logical_flow;
+struct lflow_cache;
+
+/* Various lflow cache types which
+ * - store the conjunction id offset if the lflow matches
+ * results in conjunctive OpenvSwitch flows.
+ *
+ * - Caches
+ * (1) Nothing if the logical flow has port group/address set references.
+ * (2) expr tree if the logical flow has is_chassis_resident() match.
+ * (3) expr matches if (1) and (2) are false.
+ */
+enum lflow_cache_type {
+ LCACHE_T_NONE, /* Not found in cache. */
+ LCACHE_T_CONJ_ID, /* Only conjunction id offset is cached. */
+ LCACHE_T_EXPR, /* Expr tree of the logical flow is cached. */
+ LCACHE_T_MATCHES, /* Expression matches are cached. */
+};
+
+struct lflow_cache_value {
+ enum lflow_cache_type type;
+ uint32_t conj_id_ofs;
+
+ union {
+ struct hmap *expr_matches;
+ struct expr *expr;
+ };
+};
+
+struct lflow_cache *lflow_cache_create(void);
+void lflow_cache_flush(struct lflow_cache *);
+void lflow_cache_destroy(struct lflow_cache *);
+void lflow_cache_enable(struct lflow_cache *, bool enabled);
+bool lflow_cache_is_enabled(struct lflow_cache *);
+
+void lflow_cache_add_conj_id(struct lflow_cache *,
+ const struct sbrec_logical_flow *,
+ uint32_t conj_id_ofs);
+void lflow_cache_add_expr(struct lflow_cache *,
+ const struct sbrec_logical_flow *,
+ uint32_t conj_id_ofs,
+ struct expr *expr);
+void lflow_cache_add_matches(struct lflow_cache *,
+ const struct sbrec_logical_flow *,
+ struct hmap *matches);
+
+struct lflow_cache_value *lflow_cache_get(struct lflow_cache *,
+ const struct sbrec_logical_flow *);
+void lflow_cache_delete(struct lflow_cache *,
+ const struct sbrec_logical_flow *);
+
+#endif /* controller/lflow-cache.h */
diff --git a/controller/lflow.c b/controller/lflow.c
index cc47b23..3f920d1 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -17,6 +17,7 @@
#include "lflow.h"
#include "coverage.h"
#include "ha-chassis.h"
+#include "lflow-cache.h"
#include "lport.h"
#include "ofctrl.h"
#include "openvswitch/dynamic-string.h"
@@ -306,103 +307,6 @@ lflow_resource_destroy_lflow(struct lflow_resource_ref *lfrr,
free(lfrn);
}
-enum lflow_cache_type {
- LCACHE_T_NO_CACHE,
- LCACHE_T_MATCHES,
- LCACHE_T_EXPR,
-};
-
-/* Represents an lflow cache which
- * - stores the conjunction id offset if the lflow matches
- * results in conjunctive OpenvSwitch flows.
- *
- * - Caches
- * (1) Nothing if the logical flow has port group/address set references.
- * (2) expr tree if the logical flow has is_chassis_resident() match.
- * (3) expr matches if (1) and (2) are false.
- */
-struct lflow_cache {
- struct hmap_node node;
- struct uuid lflow_uuid; /* key */
- uint32_t conj_id_ofs;
-
- enum lflow_cache_type type;
- union {
- struct {
- struct hmap *expr_matches;
- size_t n_conjs;
- };
- struct expr *expr;
- };
-};
-
-static struct lflow_cache *
-lflow_cache_add(struct hmap *lflow_cache_map,
- const struct sbrec_logical_flow *lflow)
-{
- struct lflow_cache *lc = xmalloc(sizeof *lc);
- lc->lflow_uuid = lflow->header_.uuid;
- lc->conj_id_ofs = 0;
- lc->type = LCACHE_T_NO_CACHE;
- hmap_insert(lflow_cache_map, &lc->node, uuid_hash(&lc->lflow_uuid));
- return lc;
-}
-
-static struct lflow_cache *
-lflow_cache_get(struct hmap *lflow_cache_map,
- const struct sbrec_logical_flow *lflow)
-{
- struct lflow_cache *lc;
- size_t hash = uuid_hash(&lflow->header_.uuid);
- HMAP_FOR_EACH_WITH_HASH (lc, node, hash, lflow_cache_map) {
- if (uuid_equals(&lc->lflow_uuid, &lflow->header_.uuid)) {
- return lc;
- }
- }
-
- return NULL;
-}
-
-static void
-lflow_cache_delete(struct hmap *lflow_cache_map,
- const struct sbrec_logical_flow *lflow)
-{
- struct lflow_cache *lc = lflow_cache_get(lflow_cache_map, lflow);
- if (lc) {
- hmap_remove(lflow_cache_map, &lc->node);
- if (lc->type == LCACHE_T_MATCHES) {
- expr_matches_destroy(lc->expr_matches);
- free(lc->expr_matches);
- } else if (lc->type == LCACHE_T_EXPR) {
- expr_destroy(lc->expr);
- }
- free(lc);
- }
-}
-
-void
-lflow_cache_init(struct hmap *lflow_cache_map)
-{
- hmap_init(lflow_cache_map);
-}
-
-void
-lflow_cache_destroy(struct hmap *lflow_cache_map)
-{
- struct lflow_cache *lc;
- HMAP_FOR_EACH_POP (lc, node, lflow_cache_map) {
- if (lc->type == LCACHE_T_MATCHES) {
- expr_matches_destroy(lc->expr_matches);
- free(lc->expr_matches);
- } else if (lc->type == LCACHE_T_EXPR) {
- expr_destroy(lc->expr);
- }
- free(lc);
- }
-
- hmap_destroy(lflow_cache_map);
-}
-
/* Adds the logical flows from the Logical_Flow table to flow tables. */
static void
add_logical_flows(struct lflow_ctx_in *l_ctx_in,
@@ -496,8 +400,8 @@ lflow_handle_changed_flows(struct lflow_ctx_in *l_ctx_in,
ofrn->sb_uuid = lflow->header_.uuid;
hmap_insert(&flood_remove_nodes, &ofrn->hmap_node,
uuid_hash(&ofrn->sb_uuid));
- if (l_ctx_out->lflow_cache_map) {
- lflow_cache_delete(l_ctx_out->lflow_cache_map, lflow);
+ if (lflow_cache_is_enabled(l_ctx_out->lflow_cache)) {
+ lflow_cache_delete(l_ctx_out->lflow_cache, lflow);
}
}
}
@@ -659,10 +563,10 @@ update_conj_id_ofs(uint32_t *conj_id_ofs, uint32_t n_conjs)
{
if (*conj_id_ofs + n_conjs < *conj_id_ofs) {
/* overflow */
- return false;
+ return true;
}
*conj_id_ofs += n_conjs;
- return true;
+ return false;
}
static void
@@ -884,152 +788,127 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
.lfrr = l_ctx_out->lfrr,
};
- struct expr *expr = NULL;
- if (!l_ctx_out->lflow_cache_map) {
- /* Caching is disabled. */
- expr = convert_match_to_expr(lflow, dp, &prereqs, l_ctx_in->addr_sets,
- l_ctx_in->port_groups, l_ctx_out->lfrr,
- NULL);
- if (!expr) {
- expr_destroy(prereqs);
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- return true;
- }
+ struct lflow_cache_value *lcv =
+ lflow_cache_get(l_ctx_out->lflow_cache, lflow);
+ uint32_t conj_id_ofs =
+ lcv ? lcv->conj_id_ofs : *l_ctx_out->conj_id_ofs;
+ enum lflow_cache_type lcv_type =
+ lcv ? lcv->type : LCACHE_T_NONE;
- expr = expr_evaluate_condition(expr, is_chassis_resident_cb, &cond_aux,
- NULL);
- expr = expr_normalize(expr);
- struct hmap matches = HMAP_INITIALIZER(&matches);
- uint32_t n_conjs = expr_to_matches(expr, lookup_port_cb, &aux,
- &matches);
- expr_destroy(expr);
- if (hmap_is_empty(&matches)) {
- VLOG_DBG("lflow "UUID_FMT" matches are empty, skip",
- UUID_ARGS(&lflow->header_.uuid));
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- expr_matches_destroy(&matches);
- return true;
- }
-
- expr_matches_prepare(&matches, *l_ctx_out->conj_id_ofs);
- add_matches_to_flow_table(lflow, dp, &matches, ptable, output_ptable,
- &ovnacts, ingress, l_ctx_in, l_ctx_out);
-
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- expr_matches_destroy(&matches);
- return update_conj_id_ofs(l_ctx_out->conj_id_ofs, n_conjs);
- }
-
- /* Caching is enabled. */
- struct lflow_cache *lc =
- lflow_cache_get(l_ctx_out->lflow_cache_map, lflow);
-
- if (lc && lc->type == LCACHE_T_MATCHES) {
- /* 'matches' is cached. No need to do expr parsing and no need
- * to call expr_matches_prepare() to update the conj ids.
- * Add matches to flow table and return. */
- add_matches_to_flow_table(lflow, dp, lc->expr_matches, ptable,
- output_ptable, &ovnacts, ingress,
- l_ctx_in, l_ctx_out);
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- expr_destroy(prereqs);
- return true;
- }
+ struct expr *cached_expr = NULL, *expr = NULL;
+ struct hmap *matches = NULL;
- if (!lc) {
- /* Create the lflow_cache for the lflow. */
- lc = lflow_cache_add(l_ctx_out->lflow_cache_map, lflow);
- }
+ bool is_cr_cond_present = false;
+ bool pg_addr_set_ref = false;
+ uint32_t n_conjs = 0;
- if (lc && lc->type == LCACHE_T_EXPR) {
- expr = lc->expr;
- }
+ bool conj_id_overflow = false;
- bool pg_addr_set_ref = false;
- if (!expr) {
+ /* Get match expr, either from cache or from lflow match. */
+ switch (lcv_type) {
+ case LCACHE_T_NONE:
+ case LCACHE_T_CONJ_ID:
expr = convert_match_to_expr(lflow, dp, &prereqs, l_ctx_in->addr_sets,
l_ctx_in->port_groups, l_ctx_out->lfrr,
&pg_addr_set_ref);
if (!expr) {
- expr_destroy(prereqs);
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- return true;
+ goto done;
}
- } else {
- expr_destroy(prereqs);
- }
-
- ovs_assert(lc && expr);
-
- /* Cache the 'expr' only if the lflow doesn't reference a port group and
- * address set. */
- if (!pg_addr_set_ref) {
- /* Note: If the expr doesn't have 'is_chassis_resident, then the
- * type will be set to LCACHE_T_MATCHES and 'matches' will be
- * cached instead. See below. */
- lc->type = LCACHE_T_EXPR;
- lc->expr = expr;
- }
-
- if (lc->type == LCACHE_T_EXPR) {
- expr = expr_clone(lc->expr);
+ break;
+ case LCACHE_T_EXPR:
+ expr = expr_clone(lcv->expr);
+ break;
+ case LCACHE_T_MATCHES:
+ break;
}
- bool is_cr_cond_present = false;
- expr = expr_evaluate_condition(expr, is_chassis_resident_cb, &cond_aux,
- &is_cr_cond_present);
- expr = expr_normalize(expr);
- struct hmap *matches = xmalloc(sizeof *matches);
- uint32_t n_conjs = expr_to_matches(expr, lookup_port_cb, &aux,
- matches);
- expr_destroy(expr);
- if (hmap_is_empty(matches)) {
- VLOG_DBG("lflow "UUID_FMT" matches are empty, skip",
- UUID_ARGS(&lflow->header_.uuid));
- ovnacts_free(ovnacts.data, ovnacts.size);
- ofpbuf_uninit(&ovnacts);
- expr_matches_destroy(matches);
- free(matches);
- return true;
+ /* If caching is enabled and this is a not cached expr that doesn't refer
+ * to address sets or port groups, save it to potentially cache it later.
+ */
+ if (lcv_type == LCACHE_T_NONE
+ && lflow_cache_is_enabled(l_ctx_out->lflow_cache)
+ && !pg_addr_set_ref) {
+ cached_expr = expr_clone(expr);
}
- if (n_conjs && !lc->conj_id_ofs) {
- lc->conj_id_ofs = *l_ctx_out->conj_id_ofs;
- if (!update_conj_id_ofs(l_ctx_out->conj_id_ofs, n_conjs)) {
- lc->conj_id_ofs = 0;
- expr_matches_destroy(matches);
- free(matches);
- return false;
+ /* Normalize expression if needed. */
+ switch (lcv_type) {
+ case LCACHE_T_NONE:
+ case LCACHE_T_CONJ_ID:
+ case LCACHE_T_EXPR:
+ expr = expr_evaluate_condition(expr, is_chassis_resident_cb, &cond_aux,
+ &is_cr_cond_present);
+ expr = expr_normalize(expr);
+ break;
+ case LCACHE_T_MATCHES:
+ break;
+ }
+
+ /* Get matches, either from cache or from expr computed above. */
+ switch (lcv_type) {
+ case LCACHE_T_NONE:
+ case LCACHE_T_CONJ_ID:
+ case LCACHE_T_EXPR:
+ matches = xmalloc(sizeof *matches);
+ hmap_init(matches);
+ n_conjs = expr_to_matches(expr, lookup_port_cb, &aux, matches);
+ expr_matches_prepare(matches, conj_id_ofs);
+ if (hmap_is_empty(matches)) {
+ VLOG_DBG("lflow "UUID_FMT" matches are empty, skip",
+ UUID_ARGS(&lflow->header_.uuid));
+ goto done;
}
+ break;
+ case LCACHE_T_MATCHES:
+ matches = lcv->expr_matches;
+ break;
}
- expr_matches_prepare(matches, lc->conj_id_ofs);
-
- /* Encode OVN logical actions into OpenFlow. */
add_matches_to_flow_table(lflow, dp, matches, ptable, output_ptable,
&ovnacts, ingress, l_ctx_in, l_ctx_out);
+
+ /* Update cache if needed. */
+ switch (lcv_type) {
+ case LCACHE_T_NONE:
+ /* Entry not already in cache, update conjunction id offset and
+ * add the entry to the cache.
+ */
+ conj_id_overflow = update_conj_id_ofs(l_ctx_out->conj_id_ofs, n_conjs);
+
+ /* Cache new entry if caching is enabled. */
+ if (lflow_cache_is_enabled(l_ctx_out->lflow_cache)) {
+ if (cached_expr && !is_cr_cond_present) {
+ lflow_cache_add_matches(l_ctx_out->lflow_cache, lflow,
+ matches);
+ matches = NULL;
+ } else if (cached_expr) {
+ lflow_cache_add_expr(l_ctx_out->lflow_cache, lflow,
+ conj_id_ofs, cached_expr);
+ cached_expr = NULL;
+ } else {
+ lflow_cache_add_conj_id(l_ctx_out->lflow_cache, lflow,
+ conj_id_ofs);
+ }
+ }
+ break;
+ case LCACHE_T_CONJ_ID:
+ case LCACHE_T_EXPR:
+ break;
+ case LCACHE_T_MATCHES:
+ /* Cached matches were used, don't destroy them. */
+ matches = NULL;
+ break;
+ }
+
+done:
+ expr_destroy(prereqs);
ovnacts_free(ovnacts.data, ovnacts.size);
ofpbuf_uninit(&ovnacts);
-
- if (!is_cr_cond_present && lc->type == LCACHE_T_EXPR) {
- /* If 'is_chassis_resident' match is not present, then cache
- * 'matches'. */
- expr_destroy(lc->expr);
- lc->type = LCACHE_T_MATCHES;
- lc->expr_matches = matches;
- }
-
- if (lc->type != LCACHE_T_MATCHES) {
- expr_matches_destroy(matches);
- free(matches);
- }
-
- return true;
+ expr_destroy(expr);
+ expr_destroy(cached_expr);
+ expr_matches_destroy(matches);
+ free(matches);
+ return !conj_id_overflow;
}
static bool
@@ -1415,12 +1294,12 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
/* when lflow_run is called, it's possible that some of the logical flows
* are deleted. We need to delete the lflow cache for these
* lflows (if present), otherwise, they will not be deleted at all. */
- if (l_ctx_out->lflow_cache_map) {
+ if (lflow_cache_is_enabled(l_ctx_out->lflow_cache)) {
const struct sbrec_logical_flow *lflow;
SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_TRACKED (lflow,
l_ctx_in->logical_flow_table) {
if (sbrec_logical_flow_is_deleted(lflow)) {
- lflow_cache_delete(l_ctx_out->lflow_cache_map, lflow);
+ lflow_cache_delete(l_ctx_out->lflow_cache, lflow);
}
}
}
diff --git a/controller/lflow.h b/controller/lflow.h
index ba79cc3..e973704 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -34,6 +34,7 @@
*/
#include <stdint.h>
+#include "lflow-cache.h"
#include "openvswitch/hmap.h"
#include "openvswitch/uuid.h"
#include "openvswitch/list.h"
@@ -150,7 +151,7 @@ struct lflow_ctx_out {
struct ovn_extend_table *group_table;
struct ovn_extend_table *meter_table;
struct lflow_resource_ref *lfrr;
- struct hmap *lflow_cache_map;
+ struct lflow_cache *lflow_cache;
uint32_t *conj_id_ofs;
bool conj_id_overflow;
};
@@ -169,9 +170,6 @@ void lflow_handle_changed_neighbors(
bool lflow_handle_changed_lbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
void lflow_destroy(void);
-void lflow_cache_init(struct hmap *);
-void lflow_cache_destroy(struct hmap *);
-
bool lflow_add_flows_for_datapath(const struct sbrec_datapath_binding *,
struct lflow_ctx_in *,
struct lflow_ctx_out *);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index ef3e0e9..c0737a8 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -36,6 +36,7 @@
#include "ip-mcast.h"
#include "openvswitch/hmap.h"
#include "lflow.h"
+#include "lflow-cache.h"
#include "lib/vswitch-idl.h"
#include "lport.h"
#include "memory.h"
@@ -94,6 +95,10 @@ static unixctl_cb_func debug_delay_nb_cfg_report;
static char *parse_options(int argc, char *argv[]);
OVS_NO_RETURN static void usage(void);
+struct controller_engine_ctx {
+ struct lflow_cache *lflow_cache;
+};
+
/* Pending packet to be injected into connected OVS. */
struct pending_pkt {
/* Setting 'conn' indicates that a request is pending. */
@@ -530,7 +535,8 @@ get_ofctrl_probe_interval(struct ovsdb_idl *ovs_idl)
static void
update_sb_db(struct ovsdb_idl *ovs_idl, struct ovsdb_idl *ovnsb_idl,
bool *monitor_all_p, bool *reset_ovnsb_idl_min_index,
- bool *enable_lflow_cache, unsigned int *sb_cond_seqno)
+ struct controller_engine_ctx *ctx,
+ unsigned int *sb_cond_seqno)
{
const struct ovsrec_open_vswitch *cfg = ovsrec_open_vswitch_first(ovs_idl);
if (!cfg) {
@@ -571,9 +577,11 @@ update_sb_db(struct ovsdb_idl *ovs_idl, struct ovsdb_idl *ovnsb_idl,
*reset_ovnsb_idl_min_index = false;
}
- if (enable_lflow_cache != NULL) {
- *enable_lflow_cache =
- smap_get_bool(&cfg->external_ids, "ovn-enable-lflow-cache", true);
+ if (ctx) {
+ lflow_cache_enable(ctx->lflow_cache,
+ smap_get_bool(&cfg->external_ids,
+ "ovn-enable-lflow-cache",
+ true));
}
}
@@ -952,10 +960,6 @@ enum ovs_engine_node {
OVS_NODES
#undef OVS_NODE
-struct controller_engine_ctx {
- bool enable_lflow_cache;
-};
-
struct ed_type_ofctrl_is_connected {
bool connected;
};
@@ -1714,8 +1718,7 @@ physical_flow_changes_ovs_iface_handler(struct engine_node *node, void *data)
struct flow_output_persistent_data {
uint32_t conj_id_ofs;
- struct hmap lflow_cache_map;
- bool lflow_cache_enabled;
+ struct lflow_cache *lflow_cache;
};
struct ed_type_flow_output {
@@ -1904,11 +1907,7 @@ static void init_lflow_ctx(struct engine_node *node,
l_ctx_out->meter_table = &fo->meter_table;
l_ctx_out->lfrr = &fo->lflow_resource_ref;
l_ctx_out->conj_id_ofs = &fo->pd.conj_id_ofs;
- if (fo->pd.lflow_cache_enabled) {
- l_ctx_out->lflow_cache_map = &fo->pd.lflow_cache_map;
- } else {
- l_ctx_out->lflow_cache_map = NULL;
- }
+ l_ctx_out->lflow_cache = fo->pd.lflow_cache;
l_ctx_out->conj_id_overflow = false;
}
@@ -1923,8 +1922,6 @@ en_flow_output_init(struct engine_node *node OVS_UNUSED,
ovn_extend_table_init(&data->meter_table);
data->pd.conj_id_ofs = 1;
lflow_resource_init(&data->lflow_resource_ref);
- lflow_cache_init(&data->pd.lflow_cache_map);
- data->pd.lflow_cache_enabled = true;
return data;
}
@@ -1936,7 +1933,7 @@ en_flow_output_cleanup(void *data)
ovn_extend_table_destroy(&flow_output_data->group_table);
ovn_extend_table_destroy(&flow_output_data->meter_table);
lflow_resource_destroy(&flow_output_data->lflow_resource_ref);
- lflow_cache_destroy(&flow_output_data->pd.lflow_cache_map);
+ lflow_cache_destroy(flow_output_data->pd.lflow_cache);
}
static void
@@ -1983,13 +1980,10 @@ en_flow_output_run(struct engine_node *node, void *data)
}
struct controller_engine_ctx *ctrl_ctx = engine_get_context()->client_ctx;
- if (fo->pd.lflow_cache_enabled && !ctrl_ctx->enable_lflow_cache) {
- lflow_cache_destroy(&fo->pd.lflow_cache_map);
- lflow_cache_init(&fo->pd.lflow_cache_map);
- }
- fo->pd.lflow_cache_enabled = ctrl_ctx->enable_lflow_cache;
- if (!fo->pd.lflow_cache_enabled) {
+ fo->pd.lflow_cache = ctrl_ctx->lflow_cache;
+
+ if (!lflow_cache_is_enabled(fo->pd.lflow_cache)) {
fo->pd.conj_id_ofs = 1;
}
@@ -2006,8 +2000,7 @@ en_flow_output_run(struct engine_node *node, void *data)
ovn_extend_table_clear(meter_table, false /* desired */);
lflow_resource_clear(lfrr);
fo->pd.conj_id_ofs = 1;
- lflow_cache_destroy(&fo->pd.lflow_cache_map);
- lflow_cache_init(&fo->pd.lflow_cache_map);
+ lflow_cache_flush(fo->pd.lflow_cache);
l_ctx_out.conj_id_overflow = false;
lflow_run(&l_ctx_in, &l_ctx_out);
if (l_ctx_out.conj_id_overflow) {
@@ -2691,7 +2684,7 @@ main(int argc, char *argv[])
unsigned int ovnsb_expected_cond_seqno = UINT_MAX;
struct controller_engine_ctx ctrl_engine_ctx = {
- .enable_lflow_cache = true
+ .lflow_cache = lflow_cache_create(),
};
char *ovn_version = ovn_get_internal_version();
@@ -2735,8 +2728,7 @@ main(int argc, char *argv[])
update_sb_db(ovs_idl_loop.idl, ovnsb_idl_loop.idl, &sb_monitor_all,
&reset_ovnsb_idl_min_index,
- &ctrl_engine_ctx.enable_lflow_cache,
- &ovnsb_expected_cond_seqno);
+ &ctrl_engine_ctx, &ovnsb_expected_cond_seqno);
update_ssl_config(ovsrec_ssl_table_get(ovs_idl_loop.idl));
ofctrl_set_probe_interval(get_ofctrl_probe_interval(ovs_idl_loop.idl));
@@ -3258,8 +3250,7 @@ flush_lflow_cache(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
{
VLOG_INFO("User triggered lflow cache flush.");
struct flow_output_persistent_data *fo_pd = arg_;
- lflow_cache_destroy(&fo_pd->lflow_cache_map);
- lflow_cache_init(&fo_pd->lflow_cache_map);
+ lflow_cache_flush(fo_pd->lflow_cache);
fo_pd->conj_id_ofs = 1;
engine_set_force_recompute(true);
poll_immediate_wake();
diff --git a/lib/expr.c b/lib/expr.c
index 796e88a..356e6fe 100644
--- a/lib/expr.c
+++ b/lib/expr.c
@@ -3151,6 +3151,10 @@ expr_matches_destroy(struct hmap *matches)
{
struct expr_match *m;
+ if (!matches) {
+ return;
+ }
+
HMAP_FOR_EACH_POP (m, hmap_node, matches) {
free(m->conjunctions);
free(m);
More information about the dev
mailing list