[ovs-dev] [RFC PATCH] classifier: Segmented sub-table matching.

Jarno Rajahalme jrajahalme at nicira.com
Fri Oct 25 23:32:57 UTC 2013


Adds additional indices to classifier tables to be able to
stop matching as soon as it is evident that a match will
not be found or if a single matching rule is found.
Especaially avoids folding in the transport port masks,
if they need not be examined.

The struct flow is re-organized to allow hashing in consecutive
ranges.  The indexing segment boundaries are defined in
lib/flow.h to keep with the struct definition.  Hopefully this
aids in keeping them up-to-date as struct flow is being
evolved.

Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
 lib/classifier.c    |  158 +++++++++++++++++++++++++++++++++++++++++++-------
 lib/classifier.h    |    5 ++
 lib/flow.c          |  160 +++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/flow.h          |   73 +++++++++++++++++------
 tests/classifier.at |   38 ++++++++++++
 5 files changed, 395 insertions(+), 39 deletions(-)

diff --git a/lib/classifier.c b/lib/classifier.c
index 53487a4..a92fc6f 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -42,7 +42,8 @@ static void update_tables_after_removal(struct classifier *,
                                         unsigned int del_priority);
 
 static struct cls_rule *find_match(const struct cls_table *,
-                                   const struct flow *);
+                                   const struct flow *,
+                                   struct flow_wildcards *);
 static struct cls_rule *find_equal(struct cls_table *,
                                    const struct miniflow *, uint32_t hash);
 static struct cls_rule *insert_rule(struct classifier *,
@@ -297,8 +298,15 @@ classifier_remove(struct classifier *cls, struct cls_rule *rule)
     struct cls_partition *partition;
     struct cls_rule *head;
     struct cls_table *table;
+    int i;
 
     table = find_table(cls, &rule->match.mask);
+
+    /* Remove rule node from indices. */
+    for (i = 0; i < table->n_hindex; i++) {
+        hindex_remove(&table->hindex[i], &rule->hindex_node[i]);
+    }
+
     head = find_equal(table, &rule->match.flow, rule->hmap_node.hash);
     if (head != rule) {
         list_remove(&rule->list);
@@ -379,10 +387,7 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
             continue;
         }
 
-        rule = find_match(table, flow);
-        if (wc) {
-            flow_wildcards_fold_minimask(wc, &table->mask);
-        }
+        rule = find_match(table, flow, wc);
         if (rule) {
             best = rule;
             LIST_FOR_EACH_CONTINUE (table, list_node, &cls->tables_priority) {
@@ -395,10 +400,7 @@ classifier_lookup(const struct classifier *cls, const struct flow *flow,
                     continue;
                 }
 
-                rule = find_match(table, flow);
-                if (wc) {
-                    flow_wildcards_fold_minimask(wc, &table->mask);
-                }
+                rule = find_match(table, flow, wc);
                 if (rule && rule->priority > best->priority) {
                     best = rule;
                 }
@@ -654,11 +656,45 @@ insert_table(struct classifier *cls, const struct minimask *mask)
 {
     uint32_t hash = minimask_hash(mask, 0);
     struct cls_table *table;
+    int i;
+    struct flow_wildcards wc, new;
+    uint8_t prev_u32ofs;
+    int n_index;
 
     table = xzalloc(sizeof *table);
     hmap_init(&table->rules);
     minimask_clone(&table->mask, mask);
-    hmap_insert(&cls->tables, &table->hmap_node, minimask_hash(mask, 0));
+
+    /* Init indices. */
+    flow_wildcards_init_catchall(&new);
+    wc = new;
+    n_index = 0;
+    prev_u32ofs = 0;
+    for (i = 0; i < FLOW_SEGMENTS; i++) {
+        flow_wildcards_fold_minimask_range(&new, mask, prev_u32ofs,
+                                           flow_segments_u32[i]);
+        /* Add an index if it adds mask bits. */
+        if (!flow_wildcards_equal(&new, &wc)) {
+            hindex_init(&table->hindex[n_index]);
+            table->flow_segment_ofs[n_index] = flow_segments_u32[i];
+            n_index++;
+            wc = new;
+        }
+        prev_u32ofs = flow_segments_u32[i];
+    }
+    /* Check if the rest of the table's mask adds any bits,
+     * and remove the last index if it doesn't. */
+    if (n_index) {
+        flow_wildcards_fold_minimask_range(&new, mask, prev_u32ofs, FLOW_U32S);
+        if (flow_wildcards_equal(&new, &wc)) {
+            n_index--;
+            table->flow_segment_ofs[n_index] = 0;
+            hindex_destroy(&table->hindex[n_index]);
+        }
+    }
+    table->n_hindex = n_index;
+
+    hmap_insert(&cls->tables, &table->hmap_node, hash);
     list_push_back(&cls->tables_priority, &table->list_node);
     table->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
                   ? tag_create_deterministic(hash)
@@ -670,8 +706,14 @@ insert_table(struct classifier *cls, const struct minimask *mask)
 static void
 destroy_table(struct classifier *cls, struct cls_table *table)
 {
+    int i;
+
     minimask_destroy(&table->mask);
     hmap_remove(&cls->tables, &table->hmap_node);
+    /* Destroy indices. */
+    for (i = 0; i < table->n_hindex; i++) {
+        hindex_destroy(&table->hindex[i]);
+    }
     hmap_destroy(&table->rules);
     list_remove(&table->list_node);
     free(table);
@@ -768,18 +810,69 @@ update_tables_after_removal(struct classifier *cls, struct cls_table *table,
 }
 
 static struct cls_rule *
-find_match(const struct cls_table *table, const struct flow *flow)
-{
-    uint32_t hash = flow_hash_in_minimask(flow, &table->mask, 0);
-    struct cls_rule *rule;
+find_match(const struct cls_table *table, const struct flow *flow,
+           struct flow_wildcards *wc)
+{
+    uint32_t hash = 0;
+    struct cls_rule *rule = NULL;
+    int i;
+    uint8_t prev_u32ofs = 0;
+
+    /* Try to finish early with indices. */
+
+    for (i = 0; i < table->n_hindex; i++) {
+        struct hindex_node *inode;
+
+        hash = flow_hash_in_minimask_range(flow, &table->mask, prev_u32ofs,
+                                           table->flow_segment_ofs[i], hash);
+        inode = hindex_node_with_hash(&table->hindex[i], hash);
+        if (!inode) {
+            /* No match, can stop immediately, but must fold in the mask
+             * covered so far. */
+            if (wc) {
+                flow_wildcards_fold_minimask_range(wc, &table->mask, 0,
+                                                   table->flow_segment_ofs[i]);
+            }
+            return NULL;
+        }
+
+        /* Note: Hash collisions may make us look deeper than absolutely
+         * necessary (false positives). */
+
+        if (!inode->s) {
+            struct cls_rule *rl;
+            /* Found single candidate. */
+            ASSIGN_CONTAINER(rl, inode, hindex_node[i]);
 
+            /* Do not check same rule again. */
+            if (rl != rule) {
+                rule = rl; /* Update last rule we looked at. */
+
+                if (minimatch_matches_flow(&rule->match, flow)) {
+                    /* Found match, no need to look further. */
+                    goto out;
+                }
+                /* No match, keep looking to avoid folding the full mask. */
+            }
+        }
+        prev_u32ofs = table->flow_segment_ofs[i];
+    }
+
+    hash = flow_hash_in_minimask_range(flow, &table->mask, prev_u32ofs,
+                                       FLOW_U32S, hash);
     HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &table->rules) {
         if (minimatch_matches_flow(&rule->match, flow)) {
-            return rule;
+            goto out;
         }
     }
 
-    return NULL;
+    rule = NULL;
+
+ out:
+    if (wc) {
+        flow_wildcards_fold_minimask(wc, &table->mask);
+    }
+    return rule;
 }
 
 static struct cls_rule *
@@ -796,24 +889,40 @@ find_equal(struct cls_table *table, const struct miniflow *flow, uint32_t hash)
 }
 
 static struct cls_rule *
-insert_rule(struct classifier *cls,
-            struct cls_table *table, struct cls_rule *new)
+insert_rule(struct classifier *cls, struct cls_table *table,
+            struct cls_rule *new)
 {
     struct cls_rule *head;
     struct cls_rule *old = NULL;
+    int i;
+    uint32_t hash = 0;
+    uint8_t prev_u32ofs = 0;
+
+    /* Add new node to indices. TODO: Add only if top of list? */
+    for (i = 0; i < table->n_hindex; i++) {
+        hash = miniflow_hash_in_minimask_range(&new->match.flow,
+                                               &new->match.mask, prev_u32ofs,
+                                               table->flow_segment_ofs[i],
+                                               hash);
+        hindex_insert(&table->hindex[i], &new->hindex_node[i], hash);
+        prev_u32ofs = table->flow_segment_ofs[i];
+    }
 
-    new->hmap_node.hash = miniflow_hash_in_minimask(&new->match.flow,
-                                                    &new->match.mask, 0);
+    hash = miniflow_hash_in_minimask_range(&new->match.flow, &new->match.mask,
+                                           prev_u32ofs, FLOW_U32S, hash);
 
-    head = find_equal(table, &new->match.flow, new->hmap_node.hash);
+    head = find_equal(table, &new->match.flow, hash);
     if (!head) {
-        hmap_insert(&table->rules, &new->hmap_node, new->hmap_node.hash);
+        hmap_insert(&table->rules, &new->hmap_node, hash);
         list_init(&new->list);
         goto out;
     } else {
         /* Scan the list for the insertion point that will keep the list in
          * order of decreasing priority. */
         struct cls_rule *rule;
+
+        new->hmap_node.hash = hash; /* Otherwise done by hmap_insert. */
+
         FOR_EACH_RULE_IN_LIST (rule, head) {
             if (new->priority >= rule->priority) {
                 if (rule == head) {
@@ -840,6 +949,11 @@ insert_rule(struct classifier *cls,
  out:
     if (!old) {
         update_tables_after_insertion(cls, table, new->priority);
+    } else {
+        /* Remove old node from indices. */
+        for (i = 0; i < table->n_hindex; i++) {
+            hindex_remove(&table->hindex[i], &old->hindex_node[i]);
+        }
     }
     return old;
 }
diff --git a/lib/classifier.h b/lib/classifier.h
index 8e3bf61..d1f6eff 100644
--- a/lib/classifier.h
+++ b/lib/classifier.h
@@ -103,6 +103,7 @@
  * - Only the main thread is allowed to iterate over rules. */
 
 #include "flow.h"
+#include "hindex.h"
 #include "hmap.h"
 #include "list.h"
 #include "match.h"
@@ -138,6 +139,9 @@ struct cls_table {
     unsigned int max_priority;  /* Max priority of any rule in the table. */
     unsigned int max_count;     /* Count of max_priority rules. */
     tag_type tag;               /* Tag generated from mask for partitioning. */
+    struct hindex hindex[FLOW_SEGMENTS]; /* Search indices. */
+    uint8_t n_hindex;           /* How many indices to use. */
+    uint8_t flow_segment_ofs[FLOW_SEGMENTS]; /* Ending u32 flow offsets. */
 };
 
 /* Returns true if 'table' is a "catch-all" table that will match every
@@ -151,6 +155,7 @@ cls_table_is_catchall(const struct cls_table *table)
 /* A rule in a "struct cls_table". */
 struct cls_rule {
     struct hmap_node hmap_node; /* Within struct cls_table 'rules'. */
+    struct hindex_node hindex_node[FLOW_SEGMENTS];
     struct list list;           /* List of identical, lower-priority rules. */
     struct minimatch match;     /* Matching rule. */
     unsigned int priority;      /* Larger numbers are higher priorities. */
diff --git a/lib/flow.c b/lib/flow.c
index 51851cf..f046d63 100644
--- a/lib/flow.c
+++ b/lib/flow.c
@@ -41,6 +41,13 @@
 COVERAGE_DEFINE(flow_extract);
 COVERAGE_DEFINE(miniflow_malloc);
 
+/* U32 indices for segmented flow classification. */
+uint8_t flow_segments_u32[FLOW_SEGMENTS] = {
+    FLOW_SEGMENT_1_ENDS_AT / 4,
+    FLOW_SEGMENT_2_ENDS_AT / 4,
+    FLOW_SEGMENT_3_ENDS_AT / 4,
+};
+
 static struct arp_eth_header *
 pull_arp(struct ofpbuf *packet)
 {
@@ -682,6 +689,58 @@ flow_wildcards_fold_minimask(struct flow_wildcards *wc,
     flow_union_with_miniflow(&wc->masks, &mask->masks);
 }
 
+/* Perform a bitwise OR of miniflow 'src' flow data in range [start, end)
+ * with the equivalent fields in 'dst', storing the result in 'dst'. */
+static void
+flow_union_with_miniflow_range(struct flow *dst, const struct miniflow *src,
+                               uint8_t start, uint8_t end)
+{
+    uint32_t *dst_u32 = (uint32_t *) dst;
+    const uint32_t *p = src->values;
+    int i = 0;
+    int end_ofs = end;
+    uint32_t msk;
+
+    while (start >= 32) {
+        p += popcount(src->map[i]);
+        i++;
+        start -= 32;
+        end_ofs -= 32;
+        dst_u32 += 32;
+    }
+    msk = (1u << start) - 1; /* 'start' LSBs set */
+
+    for (; i < MINI_N_MAPS; i++) {
+        uint32_t map = src->map[i];
+
+        if (start > 0) {
+            p += popcount(map & msk);  /* Skip to start. */
+            map &= ~msk;
+            start = 0;
+        }
+
+        for (; map; map = zero_rightmost_1bit(map)) {
+            int ofs = raw_ctz(map);
+            if (ofs >= end_ofs) {
+                return;
+            }
+            dst_u32[ofs] |= *p++;
+        }
+        dst_u32 += 32;
+        end_ofs -= 32;
+    }
+}
+
+/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
+ * in range [start, end). */
+void
+flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
+                                   const struct minimask *mask,
+                                   uint8_t start, uint8_t end)
+{
+    flow_union_with_miniflow_range(&wc->masks, &mask->masks, start, end);
+}
+
 /* Returns a hash of the wildcards in 'wc'. */
 uint32_t
 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
@@ -1414,6 +1473,53 @@ miniflow_hash_in_minimask(const struct miniflow *flow,
     return mhash_finish(hash, (p - mask->masks.values) * 4);
 }
 
+/* Returns a hash value for the bits of range [start, end) in 'flow',
+ * where there are 1-bits in 'mask', given 'hash'.
+ *
+ * The hash values returned by this function are the same as those returned by
+ * flow_hash_in_minimask_range(), only the form of the arguments differ. */
+uint32_t
+miniflow_hash_in_minimask_range(const struct miniflow *flow,
+                                const struct minimask *mask,
+                                uint8_t start, uint8_t end, uint32_t hash)
+{
+    const uint32_t *p = mask->masks.values;
+    int i = 0;
+    uint32_t msk;
+
+    while (start >= 32) {
+        p += popcount(mask->masks.map[i]);
+        i++;
+        start -= 32;
+    }
+    msk = (1u << start) - 1; /* 'start' LSB set */
+
+    for (; i < MINI_N_MAPS; i++) {
+        uint32_t map = mask->masks.map[i];
+
+        if (start > 0) {
+            p += popcount(map & msk);  /* Skip to start. */
+            map &= ~msk;
+            start = 0;
+        }
+
+        for (; map; map = zero_rightmost_1bit(map)) {
+            if (*p) {
+                int ofs = raw_ctz(map) + i * 32;
+                if (ofs >= end) {
+                    goto out;
+                }
+                hash = mhash_add(hash, miniflow_get(flow, ofs) & *p);
+            }
+            p++;
+        }
+    }
+ out:
+    /* RFC: Only finish at the end for compatability. */
+    return end * 4 < sizeof(struct flow) ? hash :
+        mhash_finish(hash, (p - mask->masks.values) * 4);
+}
+
 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
  * 'mask', given 'basis'.
  *
@@ -1444,6 +1550,60 @@ flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
 
     return mhash_finish(hash, (p - mask->masks.values) * 4);
 }
+
+/* Returns a hash value for the bits of range [start, end) in 'flow',
+ * where there are 1-bits in 'mask', given 'hash'.
+ *
+ * The hash values returned by this function are the same as those returned by
+ * miniflow_hash_in_minimask_range(), only the form of the arguments differ. */
+uint32_t
+flow_hash_in_minimask_range(const struct flow *flow,
+                            const struct minimask *mask,
+                            uint8_t start, uint8_t end, uint32_t hash)
+{
+    const uint32_t *flow_u32 = (const uint32_t *) flow;
+    const uint32_t *p = mask->masks.values;
+    int i = 0;
+    int end_ofs = end;
+    uint32_t msk;
+
+    while (start >= 32) {
+        p += popcount(mask->masks.map[i]);
+        i++;
+        start -= 32;
+        end_ofs -= 32;
+        flow_u32 += 32;
+    }
+    msk = (1u << start) - 1; /* 'start' LSBs set */
+
+    for (; i < MINI_N_MAPS; i++) {
+        uint32_t map = mask->masks.map[i];
+
+        if (start > 0) {
+            p += popcount(map & msk);  /* Skip to start. */
+            map &= ~msk;
+            start = 0;
+        }
+
+        for (; map; map = zero_rightmost_1bit(map)) {
+            if (*p) {
+                int ofs = raw_ctz(map);
+                if (ofs >= end_ofs) {
+                    goto out;
+                }
+                hash = mhash_add(hash, flow_u32[ofs] & *p);
+            }
+            p++;
+        }
+        flow_u32 += 32;
+        end_ofs -= 32;
+    }
+ out:
+    /* RFC: Only finish at the end for compatability. */
+    return end * 4 < sizeof(struct flow) ? hash :
+        mhash_finish(hash, (p - mask->masks.values) * 4);
+}
+
 
 /* Initializes 'dst' as a copy of 'src'.  The caller must eventually free 'dst'
  * with minimask_destroy(). */
diff --git a/lib/flow.h b/lib/flow.h
index ad51496..b22a08a 100644
--- a/lib/flow.h
+++ b/lib/flow.h
@@ -90,42 +90,69 @@ union flow_in_port {
  * a 32-bit datapath port number.
  */
 struct flow {
+    /* L1 */
     struct flow_tnl tunnel;     /* Encapsulating tunnel parameters. */
     ovs_be64 metadata;          /* OpenFlow Metadata. */
+    uint32_t regs[FLOW_N_REGS]; /* Registers. */
+    uint32_t skb_priority;      /* Packet priority for QoS. */
+    uint32_t pkt_mark;          /* Packet mark. */
+    union flow_in_port in_port; /* Input port.*/
+
+    /* L2 */
+    uint8_t dl_src[6];          /* Ethernet source address. */
+    uint8_t dl_dst[6];          /* Ethernet destination address. */
+    ovs_be16 dl_type;           /* Ethernet frame type. */
+    ovs_be16 vlan_tci;          /* If 802.1Q, TCI | VLAN_CFI; otherwise 0. */
+
+    /* L3 */
+    ovs_be32 mpls_lse;          /* MPLS label stack entry. */
     struct in6_addr ipv6_src;   /* IPv6 source address. */
     struct in6_addr ipv6_dst;   /* IPv6 destination address. */
     struct in6_addr nd_target;  /* IPv6 neighbor discovery (ND) target. */
-    uint32_t skb_priority;      /* Packet priority for QoS. */
-    uint32_t regs[FLOW_N_REGS]; /* Registers. */
+    ovs_be32 ipv6_label;        /* IPv6 flow label. */
     ovs_be32 nw_src;            /* IPv4 source address. */
     ovs_be32 nw_dst;            /* IPv4 destination address. */
-    ovs_be32 ipv6_label;        /* IPv6 flow label. */
-    union flow_in_port in_port; /* Input port.*/
-    uint32_t pkt_mark;          /* Packet mark. */
-    ovs_be32 mpls_lse;          /* MPLS label stack entry. */
-    ovs_be16 vlan_tci;          /* If 802.1Q, TCI | VLAN_CFI; otherwise 0. */
-    ovs_be16 dl_type;           /* Ethernet frame type. */
-    ovs_be16 tp_src;            /* TCP/UDP/SCTP source port. */
-    ovs_be16 tp_dst;            /* TCP/UDP/SCTP destination port. */
-    uint8_t dl_src[6];          /* Ethernet source address. */
-    uint8_t dl_dst[6];          /* Ethernet destination address. */
-    uint8_t nw_proto;           /* IP protocol or low 8 bits of ARP opcode. */
+    uint8_t nw_frag;            /* FLOW_FRAG_* flags. */
     uint8_t nw_tos;             /* IP ToS (including DSCP and ECN). */
+    uint8_t nw_ttl;             /* IP TTL/Hop Limit. */
+    uint8_t nw_proto;           /* IP protocol or low 8 bits of ARP opcode. */
     uint8_t arp_sha[6];         /* ARP/ND source hardware address. */
     uint8_t arp_tha[6];         /* ARP/ND target hardware address. */
-    uint8_t nw_ttl;             /* IP TTL/Hop Limit. */
-    uint8_t nw_frag;            /* FLOW_FRAG_* flags. Keep last for the
-                                   BUILD_ASSERT_DECL below */
+    /* L4 */
+    ovs_be16 tp_src;            /* TCP/UDP/SCTP source port. */
+    ovs_be16 tp_dst;            /* TCP/UDP/SCTP destination port.
+                                 * Keep last for the BUILD_ASSERT_DECL below */
 };
 BUILD_ASSERT_DECL(sizeof(struct flow) % 4 == 0);
 
 #define FLOW_U32S (sizeof(struct flow) / 4)
 
 /* Remember to update FLOW_WC_SEQ when changing 'struct flow'. */
-BUILD_ASSERT_DECL(offsetof(struct flow, nw_frag) + 1
+BUILD_ASSERT_DECL(offsetof(struct flow, tp_dst) + 2
                   == sizeof(struct flow_tnl) + 152
                   && FLOW_WC_SEQ == 21);
 
+/* Incremental points at which flow classification may be performed in
+ * segments.
+ * This is located here since this is dependent on the structure of the
+ * struct flow defined above:
+ * Each offset must be on a distint, successive U32 boundary srtictly
+ * within the struct flow. */
+enum {
+    FLOW_SEGMENTS = 3,
+    FLOW_SEGMENT_1_ENDS_AT = offsetof(struct flow, dl_src),
+    FLOW_SEGMENT_2_ENDS_AT = offsetof(struct flow, mpls_lse),
+    FLOW_SEGMENT_3_ENDS_AT = offsetof(struct flow, tp_src),
+};
+extern uint8_t flow_segments_u32[FLOW_SEGMENTS];
+BUILD_ASSERT_DECL(FLOW_SEGMENT_1_ENDS_AT % 4 == 0);
+BUILD_ASSERT_DECL(FLOW_SEGMENT_2_ENDS_AT % 4 == 0);
+BUILD_ASSERT_DECL(FLOW_SEGMENT_3_ENDS_AT % 4 == 0);
+BUILD_ASSERT_DECL(                     0 < FLOW_SEGMENT_1_ENDS_AT);
+BUILD_ASSERT_DECL(FLOW_SEGMENT_1_ENDS_AT < FLOW_SEGMENT_2_ENDS_AT);
+BUILD_ASSERT_DECL(FLOW_SEGMENT_2_ENDS_AT < FLOW_SEGMENT_3_ENDS_AT);
+BUILD_ASSERT_DECL(FLOW_SEGMENT_3_ENDS_AT < sizeof(struct flow));
+
 /* Represents the metadata fields of struct flow. */
 struct flow_metadata {
     ovs_be64 tun_id;                 /* Encapsulating tunnel ID. */
@@ -262,6 +289,18 @@ bool flow_wildcards_has_extra(const struct flow_wildcards *,
 void flow_wildcards_fold_minimask(struct flow_wildcards *,
                                   const struct minimask *);
 
+void flow_wildcards_fold_minimask_range(struct flow_wildcards *,
+                                        const struct minimask *,
+                                        uint8_t start, uint8_t end);
+uint32_t flow_hash_in_minimask_range(const struct flow *,
+                                     const struct minimask *,
+                                     uint8_t start, uint8_t end,
+                                     uint32_t hash);
+uint32_t miniflow_hash_in_minimask_range(const struct miniflow *,
+                                         const struct minimask *,
+                                         uint8_t start, uint8_t end,
+                                         uint32_t hash);
+
 uint32_t flow_wildcards_hash(const struct flow_wildcards *, uint32_t basis);
 bool flow_wildcards_equal(const struct flow_wildcards *,
                           const struct flow_wildcards *);
diff --git a/tests/classifier.at b/tests/classifier.at
index cf0cc44..546c8f7 100644
--- a/tests/classifier.at
+++ b/tests/classifier.at
@@ -22,3 +22,41 @@ m4_foreach(
   [AT_SETUP([miniflow - m4_bpatsubst(testname, [-], [ ])])
    AT_CHECK([test-classifier testname], [0], [], [])
    AT_CLEANUP])])
+
+AT_BANNER([flow classifier lookup segmentation])
+AT_SETUP([flow classifier - lookup segmentation])
+OVS_VSWITCHD_START
+ADD_OF_PORTS([br0], [1], [2], [3])
+AT_DATA([flows.txt], [dnl
+table=0 in_port=1 priority=16,tcp,nw_dst=10.1.0.0/255.255.0.0,action=output(3)
+table=0 in_port=1 priority=32,tcp,nw_dst=10.1.2.15,action=output(2)
+table=0 in_port=1 priority=33,tcp,nw_dst=10.1.2.15,tp_dst=80,action=drop
+table=0 in_port=1 priority=0,ip,action=drop
+table=0 in_port=2 priority=16,tcp,nw_dst=192.168.0.0/255.255.0.0,action=output(1)
+table=0 in_port=2 priority=0,ip,action=drop
+table=0 in_port=3 priority=16,tcp,nw_src=10.1.0.0/255.255.0.0,action=output(1)
+table=0 in_port=3 priority=0,ip,action=drop
+])
+AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
+AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=2,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80'], [0], [stdout])
+AT_CHECK([tail -2 stdout], [0],
+  [Relevant fields: skb_priority=0,tcp,in_port=2,nw_dst=192.168.0.0/16,nw_frag=no
+Datapath actions: 1
+])
+AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80'], [0], [stdout])
+AT_CHECK([tail -2 stdout], [0],
+  [Relevant fields: skb_priority=0,tcp,in_port=1,nw_dst=192.168.0.2,nw_frag=no
+Datapath actions: drop
+])
+AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=10.1.2.15,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=80'], [0], [stdout])
+AT_CHECK([tail -2 stdout], [0],
+  [Relevant fields: skb_priority=0,tcp,in_port=1,nw_dst=10.1.2.15,nw_frag=no,tp_dst=80
+Datapath actions: drop
+])
+AT_CHECK([ovs-appctl ofproto/trace br0 'in_port=1,dl_src=50:54:00:00:00:05,dl_dst=50:54:00:00:00:07,dl_type=0x0800,nw_src=192.168.0.1,nw_dst=10.1.2.15,nw_proto=6,nw_tos=0,nw_ttl=128,tp_src=8,tp_dst=79'], [0], [stdout])
+AT_CHECK([tail -2 stdout], [0],
+  [Relevant fields: skb_priority=0,tcp,in_port=1,nw_dst=10.1.2.15,nw_frag=no,tp_dst=79
+Datapath actions: 2
+])
+OVS_VSWITCHD_STOP
+AT_CLEANUP
-- 
1.7.10.4




More information about the dev mailing list