[ovs-dev] [PATCH] dpif-netdev: removed hmap flow_table

Daniele Di Proietto ddiproietto at vmware.com
Tue May 13 23:15:18 UTC 2014


As suggested by others, we can use the classifier, instead of the
hash table, as the only flow container in dpif-netdev

Signed-off-by: Daniele Di Proietto <ddiproietto at vmware.com>
---
 lib/dpif-netdev.c | 68 +++++++++++++++++++++++++++----------------------------
 1 file changed, 34 insertions(+), 34 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index a255a96..9285b58 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -133,15 +133,14 @@ struct dp_netdev {
 
     /* Flows.
      *
-     * Readers of 'cls' and 'flow_table' must take a 'cls->rwlock' read lock.
+     * Readers of 'cls' must take a 'cls->rwlock' read lock.
      *
-     * Writers of 'cls' and 'flow_table' must take the 'flow_mutex' and then
+     * Writers of 'cls' must take the 'flow_mutex' and then
      * the 'cls->rwlock' write lock.  (The outer 'flow_mutex' allows writers to
-     * atomically perform multiple operations on 'cls' and 'flow_table'.)
+     * atomically perform multiple operations on 'cls'.)
      */
     struct ovs_mutex flow_mutex;
     struct classifier cls;      /* Classifier.  Protected by cls.rwlock. */
-    struct hmap flow_table OVS_GUARDED; /* Flow table. */
 
     /* Queues.
      *
@@ -203,7 +202,7 @@ struct dp_netdev_port {
     char *type;                 /* Port type as requested by user. */
 };
 
-/* A flow in dp_netdev's 'flow_table'.
+/* A flow in dp_netdev's flow_table.
  *
  *
  * Thread-safety
@@ -246,8 +245,6 @@ struct dp_netdev_flow {
     /* Packet classification. */
     const struct cls_rule cr;   /* In owning dp_netdev's 'cls'. */
 
-    /* Hash table index by unmasked flow. */
-    const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */
     const struct flow flow;      /* The flow that created this entry. */
 
     /* Statistics.
@@ -466,7 +463,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
 
     ovs_mutex_init(&dp->flow_mutex);
     classifier_init(&dp->cls, NULL);
-    hmap_init(&dp->flow_table);
 
     fat_rwlock_init(&dp->queue_rwlock);
 
@@ -567,7 +563,6 @@ dp_netdev_free(struct dp_netdev *dp)
     fat_rwlock_destroy(&dp->queue_rwlock);
 
     classifier_destroy(&dp->cls);
-    hmap_destroy(&dp->flow_table);
     ovs_mutex_destroy(&dp->flow_mutex);
     seq_destroy(dp->port_seq);
     hmap_destroy(&dp->ports);
@@ -622,7 +617,7 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
     size_t i;
 
     fat_rwlock_rdlock(&dp->cls.rwlock);
-    stats->n_flows = hmap_count(&dp->flow_table);
+    stats->n_flows = classifier_count(&dp->cls);
     fat_rwlock_unlock(&dp->cls.rwlock);
 
     stats->n_hit = stats->n_missed = stats->n_lost = 0;
@@ -930,22 +925,22 @@ dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
     OVS_REQUIRES(dp->flow_mutex)
 {
     struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr);
-    struct hmap_node *node = CONST_CAST(struct hmap_node *, &flow->node);
 
     classifier_remove(&dp->cls, cr);
-    hmap_remove(&dp->flow_table, node);
     ovsrcu_postpone(dp_netdev_flow_free, flow);
 }
 
 static void
 dp_netdev_flow_flush(struct dp_netdev *dp)
 {
-    struct dp_netdev_flow *netdev_flow, *next;
+    struct dp_netdev_flow *flow, *next_flow;
+    struct cls_cursor cursor;
 
     ovs_mutex_lock(&dp->flow_mutex);
     fat_rwlock_wrlock(&dp->cls.rwlock);
-    HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
-        dp_netdev_remove_flow(dp, netdev_flow);
+    cls_cursor_init(&cursor, &dp->cls, NULL);
+    CLS_CURSOR_FOR_EACH_SAFE (flow, next_flow, cr, &cursor) {
+        dp_netdev_remove_flow(dp, flow);
     }
     fat_rwlock_unlock(&dp->cls.rwlock);
     ovs_mutex_unlock(&dp->flow_mutex);
@@ -1066,11 +1061,15 @@ dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow)
 {
     struct dp_netdev_flow *netdev_flow;
 
-    HMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0),
-                             &dp->flow_table) {
-        if (flow_equal(&netdev_flow->flow, flow)) {
-            return netdev_flow;
-        }
+    netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL));
+
+    if (!netdev_flow)
+        return NULL;
+
+    if (flow_equal(&netdev_flow->flow, flow)) {
+        return netdev_flow;
+    } else {
+        VLOG_ERR("classifier_lookup returned different flow");
     }
 
     return NULL;
@@ -1247,9 +1246,6 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow,
     fat_rwlock_wrlock(&dp->cls.rwlock);
     classifier_insert(&dp->cls,
                       CONST_CAST(struct cls_rule *, &netdev_flow->cr));
-    hmap_insert(&dp->flow_table,
-                CONST_CAST(struct hmap_node *, &netdev_flow->node),
-                flow_hash(flow, 0));
     fat_rwlock_unlock(&dp->cls.rwlock);
 
     return 0;
@@ -1297,7 +1293,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
     netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
     if (!netdev_flow) {
         if (put->flags & DPIF_FP_CREATE) {
-            if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
+            if (classifier_count(&dp->cls) < MAX_FLOWS) {
                 if (put->stats) {
                     memset(put->stats, 0, sizeof *put->stats);
                 }
@@ -1378,8 +1374,8 @@ struct dp_netdev_flow_state {
 };
 
 struct dp_netdev_flow_iter {
-    uint32_t bucket;
-    uint32_t offset;
+    struct cls_cursor cursor;
+    struct cls_rule *rule_next;
     int status;
     struct ovs_mutex mutex;
 };
@@ -1401,13 +1397,16 @@ dpif_netdev_flow_dump_state_uninit(void *state_)
 }
 
 static int
-dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **iterp)
+dpif_netdev_flow_dump_start(const struct dpif *dpif, void **iterp)
 {
+    struct dp_netdev *dp = get_dp_netdev(dpif);
     struct dp_netdev_flow_iter *iter;
 
     *iterp = iter = xmalloc(sizeof *iter);
-    iter->bucket = 0;
-    iter->offset = 0;
+    fat_rwlock_rdlock(&dp->cls.rwlock);
+    cls_cursor_init(&iter->cursor, &dp->cls, NULL);
+    iter->rule_next = cls_cursor_first(&iter->cursor);
+    fat_rwlock_unlock(&dp->cls.rwlock);
     iter->status = 0;
     ovs_mutex_init(&iter->mutex);
     return 0;
@@ -1431,15 +1430,16 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_,
     ovs_mutex_lock(&iter->mutex);
     error = iter->status;
     if (!error) {
-        struct hmap_node *node;
+        struct cls_rule *rule = iter->rule_next;
 
         fat_rwlock_rdlock(&dp->cls.rwlock);
-        node = hmap_at_position(&dp->flow_table, &iter->bucket, &iter->offset);
-        if (node) {
-            netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
+
+        if (rule) {
+            netdev_flow = CONTAINER_OF(rule, struct dp_netdev_flow, cr);
+            iter->rule_next = cls_cursor_next(&iter->cursor, rule);
         }
         fat_rwlock_unlock(&dp->cls.rwlock);
-        if (!node) {
+        if (!rule) {
             iter->status = error = EOF;
         }
     }
-- 
2.0.0.rc0




More information about the dev mailing list