[ovs-dev] [PATCH v2 9/9] Classifier: On-demand prefix tree lookup.
Jarno Rajahalme
jrajahalme at nicira.com
Wed Nov 13 21:32:49 UTC 2013
There is a bit more bookkeeping, and preliminary measurements did
not show significant benefit. Results may differ with larger
flow tables with more address space, though.
Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
lib/classifier.c | 158 +++++++++++++++++++++++++++++-------------------------
1 file changed, 85 insertions(+), 73 deletions(-)
diff --git a/lib/classifier.c b/lib/classifier.c
index f7fd1f2..4a37b86 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -43,8 +43,7 @@ static void update_subtables_after_removal(struct classifier *,
unsigned int del_priority);
static struct cls_rule *find_match(const struct cls_subtable *,
- const struct flow *,
- const struct trie_ctx *,
+ const struct flow *, struct trie_ctx *,
unsigned int n_tries,
struct flow_wildcards *);
static struct cls_rule *find_equal(struct cls_subtable *,
@@ -68,8 +67,9 @@ static uint8_t minimask_get_prefix_len(const struct minimask *,
unsigned int *);
static void trie_init(struct classifier *, int trie_idx,
const struct mf_field *);
-static bool trie_lookup(const struct cls_trie *, const struct flow *,
- struct trie_ctx *);
+static uint8_t trie_lookup(const struct cls_trie *, const struct flow *,
+ uint8_t *checkbits);
+
static void trie_destroy(struct trie_node *);
static void trie_insert(struct cls_trie *, const struct cls_rule *);
static void trie_remove(struct cls_trie *, const struct cls_rule *);
@@ -456,10 +456,11 @@ classifier_remove(struct classifier *cls, struct cls_rule *rule)
* 'chekbits' prefix bits should un-wildcarded to quarantee no false matches
* will happen on the wildcarded datapath flow. */
struct trie_ctx {
- uint8_t idx; /* Index of the corresponding trie. */
+ const struct cls_trie *trie;
+ bool lookup_done; /* Status of the lookup. */
uint8_t u32ofs; /* U32 offset of the field in question. */
uint8_t match_plen; /* Longest prefix than could possibly match. */
- uint8_t checkbits; /* Prefix length needed to avoid false matches. */
+ uint8_t nbits; /* Prefix length needed to avoid false matches. */
};
/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
@@ -479,7 +480,7 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
struct cls_rule *best;
tag_type tags;
struct trie_ctx trie_ctx[CLS_MAX_TRIES];
- int i, n_tries = 0;
+ int i;
/* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
* then 'flow' cannot possibly match in 'subtable':
@@ -507,10 +508,9 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
/* Initialize trie contexts for match_find(). */
for (i = 0; i < cls->n_tries; i++) {
- if (trie_lookup(&cls->tries[i], flow, &trie_ctx[n_tries])) {
- trie_ctx[n_tries].idx = i;
- n_tries++;
- }
+ trie_ctx[i].trie = &cls->tries[i];
+ trie_ctx[i].u32ofs = cls->tries[i].field->flow_u32ofs;
+ trie_ctx[i].lookup_done = false;
}
best = NULL;
LIST_FOR_EACH (subtable, list_node, &cls->subtables_priority) {
@@ -519,7 +519,7 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
if (!tag_intersects(tags, subtable->tag)) {
continue;
}
- rule = find_match(subtable, flow, trie_ctx, n_tries, wc);
+ rule = find_match(subtable, flow, trie_ctx, cls->n_tries, wc);
if (rule) {
best = rule;
LIST_FOR_EACH_CONTINUE (subtable, list_node,
@@ -532,7 +532,7 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
if (!tag_intersects(tags, subtable->tag)) {
continue;
}
- rule = find_match(subtable, flow, trie_ctx, n_tries, wc);
+ rule = find_match(subtable, flow, trie_ctx, cls->n_tries, wc);
if (rule && rule->priority > best->priority) {
best = rule;
}
@@ -950,46 +950,60 @@ update_subtables_after_removal(struct classifier *cls,
}
}
+struct range {
+ uint8_t start;
+ uint8_t end;
+};
+
/* Return 'true' if can skip rest of the subtable based on the prefix trie
* lookup results. */
static inline bool
-check_tries(const struct trie_ctx trie_ctx[CLS_MAX_TRIES],
- unsigned int n_tries,
- const uint8_t field_plen[CLS_MAX_TRIES], uint8_t next_u32ofs,
- struct flow_wildcards *wc)
+check_tries(const struct flow *flow,
+ struct trie_ctx trie_ctx[CLS_MAX_TRIES],
+ struct range *tries, const uint8_t field_plen[CLS_MAX_TRIES],
+ struct range *ofs, struct flow_wildcards *wc)
{
int j;
/* Check if we could avoid fully unwildcarding the next level of
* fields using the prefix tries. The trie checks are done only as
* needed to avoid folding in additional bits to the wildcards mask. */
- for (j = 0; j < n_tries; j++) {
- const struct trie_ctx *ctx = &trie_ctx[j];
- /* Possible to skip the whole subtable if subtable's prefix on the
- * field is longer than what is known to match based on the
- * trie lookup. */
- if (field_plen[ctx->idx] > ctx->match_plen) {
- /* Can safely skip if no wildcarding is done... */
- if (!wc) {
- return true;
+ for (j = tries->start; j < tries->end; j++) {
+ struct trie_ctx *ctx = &trie_ctx[j];
+
+ /* Is the trie field relevant for this subtable and in the current
+ * range? */
+ if (field_plen[j] &&
+ ctx->u32ofs >= ofs->start && ctx->u32ofs < ofs->end) {
+ /* On-demand trie lookup. */
+ if (!ctx->lookup_done) {
+ ctx->match_plen = trie_lookup(ctx->trie, flow, &ctx->nbits);
+ ctx->lookup_done = true;
}
- /* ...or if the field is already unwildcarded. */
- if (mask_prefix_bits_set(wc, ctx->u32ofs, ctx->checkbits)) {
- return true;
- }
- /* We want the trie lookup to never result in unwildcarding any
- * bits that would not be unwildcarded otherwise. First check
- * that the next lookup would cover the trie field, then check
- * that the trie result will not unwildcard more bits than the
- * next lookup will. */
- if (ctx->u32ofs < next_u32ofs &&
- !(ctx->checkbits > field_plen[ctx->idx])) {
- /* Unwildcard the bits and skip the rest. */
- mask_set_prefix_bits(wc, ctx->u32ofs, ctx->checkbits);
- /* Note: Prerequisite already unwildcarded, as the only
- * prerequisite of the supported trie lookup fields is
- * the ethertype, which is always unwildcarded as part
- * of an earlier lookup segment. */
- return true;
+ /* Possible to skip the whole subtable if subtable's prefix on the
+ * field is longer than what is known to match based on the
+ * trie lookup. */
+ if (field_plen[j] > ctx->match_plen) {
+ /* Can safely skip if no wildcarding is done... */
+ if (!wc) {
+ return true;
+ }
+ /* ...or if the field is already unwildcarded. */
+ if (mask_prefix_bits_set(wc, ctx->u32ofs, ctx->nbits)) {
+ return true;
+ }
+ /* We want the trie lookup to never result in unwildcarding any
+ * bits that would not be unwildcarded otherwise. Check
+ * that the trie result will not unwildcard more bits than the
+ * next lookup will. */
+ if (!(ctx->nbits > field_plen[j])) {
+ /* Unwildcard the bits and skip the rest. */
+ mask_set_prefix_bits(wc, ctx->u32ofs, ctx->nbits);
+ /* Note: Prerequisite already unwildcarded, as the only
+ * prerequisite of the supported trie lookup fields is
+ * the ethertype, which is always unwildcarded as part
+ * of an earlier lookup segment. */
+ return true;
+ }
}
}
}
@@ -998,26 +1012,30 @@ check_tries(const struct trie_ctx trie_ctx[CLS_MAX_TRIES],
static struct cls_rule *
find_match(const struct cls_subtable *subtable, const struct flow *flow,
- const struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
+ struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
struct flow_wildcards *wc)
{
uint32_t basis = 0, hash;
struct cls_rule *rule = NULL;
int i;
- uint8_t prev_u32ofs = 0;
+ struct range tries, ofs;
+ tries.start = 0;
+ tries.end = n_tries;
+
+ ofs.start = 0;
/* Try to finish early by checking fields in segments. */
for (i = 0; i < subtable->n_indices; i++) {
struct hindex_node *inode;
+ ofs.end = subtable->index_ofs[i];
- if (check_tries(trie_ctx, n_tries, subtable->trie_plen,
- subtable->index_ofs[i], wc)) {
+ if (check_tries(flow, trie_ctx, &tries, subtable->trie_plen, &ofs,
+ wc)) {
goto range_out;
}
- hash = flow_hash_in_minimask_range(flow, &subtable->mask, prev_u32ofs,
- subtable->index_ofs[i],
- &basis);
- prev_u32ofs = subtable->index_ofs[i];
+ hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
+ ofs.end, &basis);
+ ofs.start = ofs.end;
inode = hindex_node_with_hash(&subtable->indices[i], hash);
if (!inode) {
/* No match, can stop immediately, but must fold in the mask
@@ -1042,14 +1060,15 @@ find_match(const struct cls_subtable *subtable, const struct flow *flow,
}
}
}
+ ofs.end = FLOW_U32S;
/* Trie check for the final range. */
- if (check_tries(trie_ctx, n_tries, subtable->trie_plen, FLOW_U32S, wc)) {
+ if (check_tries(flow, trie_ctx, &tries, subtable->trie_plen, &ofs, wc)) {
goto range_out;
}
/* Have 'rule' already if there is a single non-matching rule. */
if (!rule) {
- hash = flow_hash_in_minimask_range(flow, &subtable->mask, prev_u32ofs,
- FLOW_U32S, &basis);
+ hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
+ ofs.end, &basis);
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
if (minimatch_matches_flow_likely(&rule->match, flow)) {
goto out;
@@ -1065,9 +1084,9 @@ find_match(const struct cls_subtable *subtable, const struct flow *flow,
range_out:
/* Must unwildcard the fields looked up so far, if any. */
- if (wc && prev_u32ofs) {
+ if (wc && ofs.start) {
flow_wildcards_fold_minimask_range(wc, &subtable->mask,
- 0, prev_u32ofs);
+ 0, ofs.start);
}
return NULL;
}
@@ -1369,26 +1388,19 @@ trie_lookup_value(const struct trie_node *node, const ovs_be32 *value,
return match_len;
}
-/* Returns true if the '*trie_ctx' is valid on return. */
-static bool
+static uint8_t
trie_lookup(const struct cls_trie *trie, const struct flow *flow,
- struct trie_ctx *trie_ctx)
+ uint8_t *checkbits)
{
const struct mf_field *mf = trie->field;
- if (!trie->root || !mf || !mf_are_prereqs_ok(mf, flow)) {
- /* Can not skip based on this field, or the current flow
- * does not match the prerequisites for the field. Some match
- * fields are used for multiple purposes, so we should try to
- * skip only when prerequisities are met. */
- return false;
- }
-
- trie_ctx->u32ofs = mf->flow_u32ofs;
- trie_ctx->match_plen =
- trie_lookup_value(trie->root, &((ovs_be32 *)flow)[mf->flow_u32ofs],
- &trie_ctx->checkbits);
- return true;
+ /* Check that can skip based on this field, and that the current flow
+ * matches the prerequisites for the field. Some match
+ * fields are used for multiple purposes, so we should try to
+ * use the trie only when prerequisities are met. */
+ return (!trie->root || !mf || !mf_are_prereqs_ok(mf, flow)) ? UINT8_MAX
+ : trie_lookup_value(trie->root, &((ovs_be32 *)flow)[mf->flow_u32ofs],
+ checkbits);
}
/* Returns the length of a longest-prefix match mask for the field 'mf' in
--
1.7.10.4
More information about the dev
mailing list