[ovs-dev] [PATCH v2] lib/classifier: Lockless lookups.

Jarno Rajahalme jrajahalme at nicira.com
Mon Jun 30 23:37:15 UTC 2014


Now that all the relevant classifier structures use RCU and internal
mutual exclusion for modifications, we can remove the fat-rwlock and
thus make the classifier lookups lockless.

As the readers are operating concurrently with the writers, a
concurrent reader may or may not see a new rule being added by a
writer, depending on how the concurrent events overlap with each
other.  Overall, this is no different from the former locked behavior,
but there the visibility of the new rule only depended on the timing
of the locking functions.

A new rule is first added to the segment indices, so the readers may
find the rule in the indices before the rule is visible in the
subtables 'rules' map.  This may result in us losing the opportunity
to quit lookups earlier, resulting in sub-optimal wildcarding.  This
will be fixed by forthcoming revalidation always scheduled after flow
table changes.

Similar behavior may happen due to us removing the overlapping rule
(if any) from the indices only after the corresponding new rule has
been added.

The subtable's max priority is updated only after a rule is inserted
to the maps, so the concurrent readers may not see the rule, as the
updated priority ordered subtable list will only be visible after the
subtable's max priority is updated.

Similarly, the classifier's partitions are updated by the caller after
the rule is inserted to the maps, so the readers may keep skipping the
subtable until they see the updated partitions.

Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
v2: Rebase to master (classifier lookup batching).

 lib/classifier.c        |   35 ++++++++++++++++++++++++++++++++---
 lib/classifier.h        |   45 ++++++++++++++++-----------------------------
 lib/dpif-netdev.c       |   30 ++++--------------------------
 ofproto/ofproto-dpif.c  |    2 --
 ofproto/ofproto.c       |   33 ++-------------------------------
 ofproto/ofproto.h       |    3 +++
 tests/test-classifier.c |   36 +-----------------------------------
 utilities/ovs-ofctl.c   |    4 ----
 8 files changed, 58 insertions(+), 130 deletions(-)

diff --git a/lib/classifier.c b/lib/classifier.c
index 8ba6867..fc05efe 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -481,7 +481,6 @@ classifier_init(struct classifier *cls_, const uint8_t *flow_segments)
 {
     struct cls_classifier *cls = xmalloc(sizeof *cls);
 
-    fat_rwlock_init(&cls_->rwlock);
     ovs_mutex_init(&cls->mutex);
 
     ovs_mutex_lock(&cls->mutex);
@@ -506,7 +505,8 @@ classifier_init(struct classifier *cls_, const uint8_t *flow_segments)
 }
 
 /* Destroys 'cls'.  Rules within 'cls', if any, are not freed; this is the
- * caller's responsibility. */
+ * caller's responsibility.
+ * May only be called after all the readers have been terminated. */
 void
 classifier_destroy(struct classifier *cls_)
     OVS_EXCLUDED(cls_->cls->mutex)
@@ -517,7 +517,6 @@ classifier_destroy(struct classifier *cls_)
         struct cls_subtable *subtable, *next_subtable;
         int i;
 
-        fat_rwlock_destroy(&cls_->rwlock);
         if (!cls) {
             return;
         }
@@ -682,7 +681,10 @@ classifier_is_empty(const struct classifier *cls)
 /* Returns the number of rules in 'cls'. */
 int
 classifier_count(const struct classifier *cls)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
+    /* n_rules is an int, so in the presence of concurrent writers this will
+     * return either the old or a new value. */
     return cls->cls->n_rules;
 }
 
@@ -1711,6 +1713,33 @@ find_equal(struct cls_subtable *subtable, const struct miniflow *flow,
     return NULL;
 }
 
+/*
+ * As the readers are operating concurrently with the modifications, a
+ * concurrent reader may or may not see the new rule, depending on how
+ * the concurrent events overlap with each other.  This is no
+ * different from the former locked behavior, but there the visibility
+ * of the new rule only depended on the timing of the locking
+ * functions.
+ *
+ * The new rule is first added to the segment indices, so the readers
+ * may find the rule in the indices before the rule is visible in the
+ * subtables 'rules' map.  This may result in us losing the
+ * opportunity to quit lookups earlier, resulting in sub-optimal
+ * wildcarding.  This will be fixed by forthcoming revalidation always
+ * scheduled after flow table changes.
+ *
+ * Similar behavior may happen due to us removing the overlapping rule
+ * (if any) from the indices only after the new rule has been added.
+ *
+ * The subtable's max priority is updated only after the rule is
+ * inserted, so the concurrent readers may not see the rule, as the
+ * updated priority ordered subtable list will only be visible after
+ * the subtable's max priority is updated.
+ *
+ * Similarly, the classifier's partitions for new rules are updated by
+ * the caller after this function, so the readers may keep skipping
+ * the subtable until they see the updated partitions.
+ */
 static struct cls_match *
 insert_rule(struct cls_classifier *cls, struct cls_subtable *subtable,
             struct cls_rule *new_rule)
diff --git a/lib/classifier.h b/lib/classifier.h
index 224d5bb..d0a408b 100644
--- a/lib/classifier.h
+++ b/lib/classifier.h
@@ -214,7 +214,6 @@
  * by a single writer. */
 
 #include "cmap.h"
-#include "fat-rwlock.h"
 #include "match.h"
 #include "meta-flow.h"
 
@@ -222,13 +221,9 @@
 extern "C" {
 #endif
 
-/* Needed only for the lock annotation in struct classifier. */
-extern struct ovs_mutex ofproto_mutex;
-
 /* Classifier internal data structures. */
 struct cls_classifier;
 struct cls_subtable;
-struct cls_partition;
 struct cls_match;
 
 enum {
@@ -237,7 +232,6 @@ enum {
 
 /* A flow classifier. */
 struct classifier {
-    struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
     struct cls_classifier *cls;
 };
 
@@ -266,38 +260,31 @@ bool cls_rule_is_catchall(const struct cls_rule *);
 bool cls_rule_is_loose_match(const struct cls_rule *rule,
                              const struct minimatch *criteria);
 
-void classifier_init(struct classifier *cls, const uint8_t *flow_segments);
+void classifier_init(struct classifier *, const uint8_t *flow_segments);
 void classifier_destroy(struct classifier *);
-bool classifier_set_prefix_fields(struct classifier *cls,
+bool classifier_set_prefix_fields(struct classifier *,
                                   const enum mf_field_id *trie_fields,
-                                  unsigned int n_trie_fields)
-    OVS_REQ_WRLOCK(cls->rwlock);
+                                  unsigned int n_trie_fields);
+
+bool classifier_is_empty(const struct classifier *);
+int classifier_count(const struct classifier *);
+void classifier_insert(struct classifier *, struct cls_rule *);
+struct cls_rule *classifier_replace(struct classifier *, struct cls_rule *);
 
-bool classifier_is_empty(const struct classifier *cls);
-int classifier_count(const struct classifier *cls)
-    OVS_REQ_RDLOCK(cls->rwlock);
-void classifier_insert(struct classifier *cls, struct cls_rule *)
-    OVS_REQ_WRLOCK(cls->rwlock);
-struct cls_rule *classifier_replace(struct classifier *cls, struct cls_rule *)
-    OVS_REQ_WRLOCK(cls->rwlock);
-void classifier_remove(struct classifier *cls, struct cls_rule *)
-    OVS_REQ_WRLOCK(cls->rwlock);
-struct cls_rule *classifier_lookup(const struct classifier *cls,
+void classifier_remove(struct classifier *, struct cls_rule *);
+struct cls_rule *classifier_lookup(const struct classifier *,
                                    const struct flow *,
-                                   struct flow_wildcards *)
-    OVS_REQ_RDLOCK(cls->rwlock);
+                                   struct flow_wildcards *);
 void classifier_lookup_miniflow_batch(const struct classifier *cls,
                                       const struct miniflow **flows,
-                                      struct cls_rule **rules, size_t len)
-    OVS_REQ_RDLOCK(cls->rwlock);
-bool classifier_rule_overlaps(const struct classifier *cls,
-                              const struct cls_rule *)
-    OVS_REQ_RDLOCK(cls->rwlock);
+                                      struct cls_rule **rules, size_t len);
+bool classifier_rule_overlaps(const struct classifier *,
+                              const struct cls_rule *);
 
-struct cls_rule *classifier_find_rule_exactly(const struct classifier *cls,
+struct cls_rule *classifier_find_rule_exactly(const struct classifier *,
                                               const struct cls_rule *);
 
-struct cls_rule *classifier_find_match_exactly(const struct classifier *cls,
+struct cls_rule *classifier_find_match_exactly(const struct classifier *,
                                                const struct match *,
                                                unsigned int priority);
 
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 8447118..a96ce64 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -38,6 +38,7 @@
 #include "dpif-provider.h"
 #include "dummy.h"
 #include "dynamic-string.h"
+#include "fat-rwlock.h"
 #include "flow.h"
 #include "cmap.h"
 #include "latch.h"
@@ -125,7 +126,6 @@ struct dp_netdev_queue {
  *    dp_netdev_mutex (global)
  *    port_mutex
  *    flow_mutex
- *    cls.rwlock
  *    queue_rwlock
  */
 struct dp_netdev {
@@ -136,16 +136,11 @@ struct dp_netdev {
 
     /* Flows.
      *
-     * Readers of 'cls' must take a 'cls->rwlock' read lock.
-     *
-     * Writers of 'flow_table' must take the 'flow_mutex'.
-     *
-     * Writers of 'cls' must take the 'flow_mutex' and then the 'cls->rwlock'
-     * write lock.  (The outer 'flow_mutex' allows writers to atomically
-     * perform multiple operations on 'cls' and 'flow_table'.)
+     * Writers of 'flow_table' must take the 'flow_mutex'.  Corresponding
+     * changes to 'cls' must be made while still holding the 'flow_mutex'.
      */
     struct ovs_mutex flow_mutex;
-    struct classifier cls;      /* Classifier.  Protected by cls.rwlock. */
+    struct classifier cls;
     struct cmap flow_table OVS_GUARDED; /* Flow table. */
 
     /* Queues.
@@ -955,7 +950,6 @@ dp_netdev_flow_free(struct dp_netdev_flow *flow)
 
 static void
 dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
-    OVS_REQ_WRLOCK(dp->cls.rwlock)
     OVS_REQUIRES(dp->flow_mutex)
 {
     struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr);
@@ -972,11 +966,9 @@ dp_netdev_flow_flush(struct dp_netdev *dp)
     struct dp_netdev_flow *netdev_flow, *next;
 
     ovs_mutex_lock(&dp->flow_mutex);
-    fat_rwlock_wrlock(&dp->cls.rwlock);
     CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
         dp_netdev_remove_flow(dp, netdev_flow);
     }
-    fat_rwlock_unlock(&dp->cls.rwlock);
     ovs_mutex_unlock(&dp->flow_mutex);
 }
 
@@ -1073,7 +1065,6 @@ dp_netdev_flow_cast(const struct cls_rule *cr)
 
 static struct dp_netdev_flow *
 dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
-    OVS_REQ_RDLOCK(dp->cls.rwlock)
 {
     struct dp_netdev_flow *netdev_flow;
     struct cls_rule *rule;
@@ -1086,7 +1077,6 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
 
 static struct dp_netdev_flow *
 dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow)
-    OVS_REQ_RDLOCK(dp->cls.rwlock)
 {
     struct dp_netdev_flow *netdev_flow;
 
@@ -1225,9 +1215,7 @@ dpif_netdev_flow_get(const struct dpif *dpif,
         return error;
     }
 
-    fat_rwlock_rdlock(&dp->cls.rwlock);
     netdev_flow = dp_netdev_find_flow(dp, &key);
-    fat_rwlock_unlock(&dp->cls.rwlock);
 
     if (netdev_flow) {
         if (stats) {
@@ -1271,10 +1259,8 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow,
     cmap_insert(&dp->flow_table,
                 CONST_CAST(struct cmap_node *, &netdev_flow->node),
                 flow_hash(flow, 0));
-    fat_rwlock_wrlock(&dp->cls.rwlock);
     classifier_insert(&dp->cls,
                       CONST_CAST(struct cls_rule *, &netdev_flow->cr));
-    fat_rwlock_unlock(&dp->cls.rwlock);
 
     return 0;
 }
@@ -1318,9 +1304,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
     miniflow_init(&miniflow, &flow);
 
     ovs_mutex_lock(&dp->flow_mutex);
-    fat_rwlock_rdlock(&dp->cls.rwlock);
     netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
-    fat_rwlock_unlock(&dp->cls.rwlock);
     if (!netdev_flow) {
         if (put->flags & DPIF_FP_CREATE) {
             if (cmap_count(&dp->flow_table) < MAX_FLOWS) {
@@ -1382,7 +1366,6 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
     }
 
     ovs_mutex_lock(&dp->flow_mutex);
-    fat_rwlock_wrlock(&dp->cls.rwlock);
     netdev_flow = dp_netdev_find_flow(dp, &key);
     if (netdev_flow) {
         if (del->stats) {
@@ -1392,7 +1375,6 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
     } else {
         error = ENOENT;
     }
-    fat_rwlock_unlock(&dp->cls.rwlock);
     ovs_mutex_unlock(&dp->flow_mutex);
 
     return error;
@@ -1485,7 +1467,6 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
 
     ovs_mutex_lock(&dump->mutex);
     if (!dump->status) {
-        fat_rwlock_rdlock(&dp->cls.rwlock);
         for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH);
              n_flows++) {
             struct cmap_node *node;
@@ -1498,7 +1479,6 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
             netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow,
                                                  node);
         }
-        fat_rwlock_unlock(&dp->cls.rwlock);
     }
     ovs_mutex_unlock(&dump->mutex);
 
@@ -2081,9 +2061,7 @@ dp_netdev_input(struct dp_netdev *dp, struct dpif_packet **packets, int cnt,
         mfs[i] = &keys[i].flow;
     }
 
-    fat_rwlock_rdlock(&dp->cls.rwlock);
     classifier_lookup_miniflow_batch(&dp->cls, mfs, rules, cnt);
-    fat_rwlock_unlock(&dp->cls.rwlock);
 
     n_batches = 0;
     for (i = 0; i < cnt; i++) {
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index a0d5d47..3ddc1b9 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -3379,9 +3379,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
     }
 
 retry:
-    fat_rwlock_rdlock(&cls->rwlock);
     cls_rule = classifier_lookup(cls, flow, wc);
-    fat_rwlock_unlock(&cls->rwlock);
 
     rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
     if (rule && take_ref) {
diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
index b020379..b6252d7 100644
--- a/ofproto/ofproto.c
+++ b/ofproto/ofproto.c
@@ -1214,12 +1214,11 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
     }
 
     table->max_flows = s->max_flows;
-    fat_rwlock_wrlock(&table->cls.rwlock);
+
     if (classifier_set_prefix_fields(&table->cls,
                                      s->prefix_fields, s->n_prefix_fields)) {
         /* XXX: Trigger revalidation. */
     }
-    fat_rwlock_unlock(&table->cls.rwlock);
 
     ovs_mutex_lock(&ofproto_mutex);
     evict_rules_from_table(table, 0);
@@ -1551,9 +1550,7 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage)
 
     n_rules = 0;
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        fat_rwlock_rdlock(&table->cls.rwlock);
         n_rules += classifier_count(&table->cls);
-        fat_rwlock_unlock(&table->cls.rwlock);
     }
     simap_increase(usage, "rules", n_rules);
 
@@ -1841,7 +1838,6 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
 
     /* First do a cheap check whether the rule we're looking for already exists
      * with the actions that we want.  If it does, then we're done. */
-    fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, match, priority));
     if (rule) {
@@ -1851,7 +1847,6 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
     } else {
         must_add = true;
     }
-    fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
 
     /* If there's no such rule or the rule doesn't have the actions we want,
      * fall back to a executing a full flow mod.  We can't optimize this at
@@ -1882,7 +1877,6 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
         struct rule *rule;
         bool done = false;
 
-        fat_rwlock_rdlock(&table->cls.rwlock);
         rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
                                                                 &fm->match,
                                                                 fm->priority));
@@ -1907,7 +1901,6 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
             }
             ovs_mutex_unlock(&rule->mutex);
         }
-        fat_rwlock_unlock(&table->cls.rwlock);
 
         if (done) {
             return 0;
@@ -1931,10 +1924,8 @@ ofproto_delete_flow(struct ofproto *ofproto,
 
     /* First do a cheap check whether the rule we're looking for has already
      * been deleted.  If so, then we're done. */
-    fat_rwlock_rdlock(&cls->rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
                                                             priority));
-    fat_rwlock_unlock(&cls->rwlock);
     if (!rule) {
         return;
     }
@@ -3118,9 +3109,7 @@ handle_table_stats_request(struct ofconn *ofconn,
         ots[i].instructions = htonl(OFPIT11_ALL);
         ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
         ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
-        fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
         ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
-        fat_rwlock_unlock(&p->tables[i].cls.rwlock);
     }
 
     p->ofproto_class->get_tables(p, ots);
@@ -3553,10 +3542,8 @@ collect_rules_strict(struct ofproto *ofproto,
         FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
             struct rule *rule;
 
-            fat_rwlock_rdlock(&table->cls.rwlock);
             rule = rule_from_cls_rule(classifier_find_rule_exactly(
                                           &table->cls, &criteria->cr));
-            fat_rwlock_unlock(&table->cls.rwlock);
             if (rule) {
                 collect_rule(rule, criteria, rules, &n_readonly);
             }
@@ -4010,9 +3997,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
     cls_rule_init(&cr, &fm->match, fm->priority);
 
     /* Transform "add" into "modify" if there's an existing identical flow. */
-    fat_rwlock_rdlock(&table->cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
-    fat_rwlock_unlock(&table->cls.rwlock);
     if (rule) {
         struct rule_collection rules;
 
@@ -4029,13 +4014,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
 
     /* Check for overlap, if requested. */
     if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
-        bool overlaps;
-
-        fat_rwlock_rdlock(&table->cls.rwlock);
-        overlaps = classifier_rule_overlaps(&table->cls, &cr);
-        fat_rwlock_unlock(&table->cls.rwlock);
-
-        if (overlaps) {
+        if (classifier_rule_overlaps(&table->cls, &cr)) {
             cls_rule_destroy(&cr);
             return OFPERR_OFPFMFC_OVERLAP;
         }
@@ -4097,9 +4076,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
         meter_insert_rule(rule);
     }
 
-    fat_rwlock_wrlock(&table->cls.rwlock);
     classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    fat_rwlock_unlock(&table->cls.rwlock);
 
     error = ofproto->ofproto_class->rule_insert(rule);
     if (error) {
@@ -6373,10 +6350,8 @@ oftable_init(struct oftable *table)
     table->max_flows = UINT_MAX;
     atomic_init(&table->config, (unsigned int)OFPROTO_TABLE_MISS_DEFAULT);
 
-    fat_rwlock_wrlock(&table->cls.rwlock);
     classifier_set_prefix_fields(&table->cls, default_prefix_fields,
                                  ARRAY_SIZE(default_prefix_fields));
-    fat_rwlock_unlock(&table->cls.rwlock);
 
     atomic_init(&table->n_matched, 0);
     atomic_init(&table->n_missed, 0);
@@ -6388,9 +6363,7 @@ oftable_init(struct oftable *table)
 static void
 oftable_destroy(struct oftable *table)
 {
-    fat_rwlock_rdlock(&table->cls.rwlock);
     ovs_assert(classifier_is_empty(&table->cls));
-    fat_rwlock_unlock(&table->cls.rwlock);
     oftable_disable_eviction(table);
     classifier_destroy(&table->cls);
     free(table->name);
@@ -6483,9 +6456,7 @@ oftable_remove_rule__(struct ofproto *ofproto, struct rule *rule)
 {
     struct classifier *cls = &ofproto->tables[rule->table_id].cls;
 
-    fat_rwlock_wrlock(&cls->rwlock);
     classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    fat_rwlock_unlock(&cls->rwlock);
 
     cookies_remove(ofproto, rule);
 
diff --git a/ofproto/ofproto.h b/ofproto/ofproto.h
index beabcf2..d601181 100644
--- a/ofproto/ofproto.h
+++ b/ofproto/ofproto.h
@@ -45,6 +45,9 @@ struct shash;
 struct simap;
 struct smap;
 
+/* Needed for the lock annotations. */
+extern struct ovs_mutex ofproto_mutex;
+
 struct ofproto_controller_info {
     bool is_connected;
     enum ofp12_controller_role role;
diff --git a/tests/test-classifier.c b/tests/test-classifier.c
index c63b7dc..1d5515c 100644
--- a/tests/test-classifier.c
+++ b/tests/test-classifier.c
@@ -400,7 +400,6 @@ get_value(unsigned int *x, unsigned n_values)
 
 static void
 compare_classifiers(struct classifier *cls, struct tcls *tcls)
-    OVS_REQ_RDLOCK(cls->rwlock)
 {
     static const int confidence = 500;
     unsigned int i;
@@ -456,9 +455,7 @@ destroy_classifier(struct classifier *cls)
     struct test_rule *rule, *next_rule;
 
     CLS_FOR_EACH_SAFE (rule, next_rule, cls_rule, cls) {
-        fat_rwlock_wrlock(&cls->rwlock);
         classifier_remove(cls, &rule->cls_rule);
-        fat_rwlock_unlock(&cls->rwlock);
         free_rule(rule);
     }
     classifier_destroy(cls);
@@ -515,7 +512,7 @@ verify_tries(struct classifier *cls_)
 
 static void
 check_tables(const struct classifier *cls, int n_tables, int n_rules,
-             int n_dups) OVS_EXCLUDED(cls->rwlock)
+             int n_dups)
 {
     const struct cls_subtable *table;
     struct test_rule *test_rule;
@@ -697,7 +694,6 @@ static enum mf_field_id trie_fields[2] = {
 
 static void
 set_prefix_fields(struct classifier *cls)
-    OVS_REQ_WRLOCK(cls->rwlock)
 {
     verify_tries(cls);
     classifier_set_prefix_fields(cls, trie_fields, ARRAY_SIZE(trie_fields));
@@ -712,13 +708,11 @@ test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
     struct tcls tcls;
 
     classifier_init(&cls, flow_segment_u32s);
-    fat_rwlock_wrlock(&cls.rwlock);
     set_prefix_fields(&cls);
     tcls_init(&tcls);
     assert(classifier_is_empty(&cls));
     assert(tcls_is_empty(&tcls));
     compare_classifiers(&cls, &tcls);
-    fat_rwlock_unlock(&cls.rwlock);
     classifier_destroy(&cls);
     tcls_destroy(&tcls);
 }
@@ -745,23 +739,19 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
                          hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
 
         classifier_init(&cls, flow_segment_u32s);
-        fat_rwlock_wrlock(&cls.rwlock);
         set_prefix_fields(&cls);
         tcls_init(&tcls);
 
         tcls_rule = tcls_insert(&tcls, rule);
         classifier_insert(&cls, &rule->cls_rule);
         compare_classifiers(&cls, &tcls);
-        fat_rwlock_unlock(&cls.rwlock);
         check_tables(&cls, 1, 1, 0);
 
-        fat_rwlock_wrlock(&cls.rwlock);
         classifier_remove(&cls, &rule->cls_rule);
         tcls_remove(&tcls, tcls_rule);
         assert(classifier_is_empty(&cls));
         assert(tcls_is_empty(&tcls));
         compare_classifiers(&cls, &tcls);
-        fat_rwlock_unlock(&cls.rwlock);
 
         free_rule(rule);
         classifier_destroy(&cls);
@@ -787,25 +777,21 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
         rule2->aux += 5;
 
         classifier_init(&cls, flow_segment_u32s);
-        fat_rwlock_wrlock(&cls.rwlock);
         set_prefix_fields(&cls);
         tcls_init(&tcls);
         tcls_insert(&tcls, rule1);
         classifier_insert(&cls, &rule1->cls_rule);
         compare_classifiers(&cls, &tcls);
-        fat_rwlock_unlock(&cls.rwlock);
         check_tables(&cls, 1, 1, 0);
         tcls_destroy(&tcls);
 
         tcls_init(&tcls);
         tcls_insert(&tcls, rule2);
 
-        fat_rwlock_wrlock(&cls.rwlock);
         assert(test_rule_from_cls_rule(
                    classifier_replace(&cls, &rule2->cls_rule)) == rule1);
         free_rule(rule1);
         compare_classifiers(&cls, &tcls);
-        fat_rwlock_unlock(&cls.rwlock);
         check_tables(&cls, 1, 1, 0);
 
         tcls_destroy(&tcls);
@@ -904,16 +890,13 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
             }
 
             classifier_init(&cls, flow_segment_u32s);
-            fat_rwlock_wrlock(&cls.rwlock);
             set_prefix_fields(&cls);
-            fat_rwlock_unlock(&cls.rwlock);
             tcls_init(&tcls);
 
             for (i = 0; i < ARRAY_SIZE(ops); i++) {
                 int j = ops[i];
                 int m, n;
 
-                fat_rwlock_wrlock(&cls.rwlock);
                 if (!tcls_rules[j]) {
                     struct test_rule *displaced_rule;
 
@@ -937,7 +920,6 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
                     pri_rules[pris[j]] = -1;
                 }
                 compare_classifiers(&cls, &tcls);
-                fat_rwlock_unlock(&cls.rwlock);
 
                 n = 0;
                 for (m = 0; m < N_RULES; m++) {
@@ -946,14 +928,12 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
                 check_tables(&cls, n > 0, n, n - 1);
             }
 
-            fat_rwlock_wrlock(&cls.rwlock);
             for (i = 0; i < N_RULES; i++) {
                 if (rules[i]->cls_rule.cls_match) {
                     classifier_remove(&cls, &rules[i]->cls_rule);
                 }
                 free_rule(rules[i]);
             }
-            fat_rwlock_unlock(&cls.rwlock);
             classifier_destroy(&cls);
             tcls_destroy(&tcls);
         } while (next_permutation(ops, ARRAY_SIZE(ops)));
@@ -1012,9 +992,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
         } while ((1 << count_ones(value_mask)) < N_RULES);
 
         classifier_init(&cls, flow_segment_u32s);
-        fat_rwlock_wrlock(&cls.rwlock);
         set_prefix_fields(&cls);
-        fat_rwlock_unlock(&cls.rwlock);
         tcls_init(&tcls);
 
         for (i = 0; i < N_RULES; i++) {
@@ -1027,20 +1005,16 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
             rules[i] = make_rule(wcf, priority, value_pats[i]);
             tcls_rules[i] = tcls_insert(&tcls, rules[i]);
 
-            fat_rwlock_wrlock(&cls.rwlock);
             classifier_insert(&cls, &rules[i]->cls_rule);
             compare_classifiers(&cls, &tcls);
-            fat_rwlock_unlock(&cls.rwlock);
 
             check_tables(&cls, 1, i + 1, 0);
         }
 
         for (i = 0; i < N_RULES; i++) {
             tcls_remove(&tcls, tcls_rules[i]);
-            fat_rwlock_wrlock(&cls.rwlock);
             classifier_remove(&cls, &rules[i]->cls_rule);
             compare_classifiers(&cls, &tcls);
-            fat_rwlock_unlock(&cls.rwlock);
             free_rule(rules[i]);
 
             check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0);
@@ -1080,9 +1054,7 @@ test_many_rules_in_n_tables(int n_tables)
         shuffle(priorities, ARRAY_SIZE(priorities));
 
         classifier_init(&cls, flow_segment_u32s);
-        fat_rwlock_wrlock(&cls.rwlock);
         set_prefix_fields(&cls);
-        fat_rwlock_unlock(&cls.rwlock);
         tcls_init(&tcls);
 
         for (i = 0; i < MAX_RULES; i++) {
@@ -1092,10 +1064,8 @@ test_many_rules_in_n_tables(int n_tables)
             int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
             rule = make_rule(wcf, priority, value_pat);
             tcls_insert(&tcls, rule);
-            fat_rwlock_wrlock(&cls.rwlock);
             classifier_insert(&cls, &rule->cls_rule);
             compare_classifiers(&cls, &tcls);
-            fat_rwlock_unlock(&cls.rwlock);
             check_tables(&cls, -1, i + 1, -1);
         }
 
@@ -1107,16 +1077,12 @@ test_many_rules_in_n_tables(int n_tables)
 
             CLS_FOR_EACH_TARGET_SAFE (rule, next_rule, cls_rule, &cls,
                                       &target->cls_rule) {
-                fat_rwlock_wrlock(&cls.rwlock);
                 classifier_remove(&cls, &rule->cls_rule);
-                fat_rwlock_unlock(&cls.rwlock);
                 free_rule(rule);
             }
 
             tcls_delete_matches(&tcls, &target->cls_rule);
-            fat_rwlock_rdlock(&cls.rwlock);
             compare_classifiers(&cls, &tcls);
-            fat_rwlock_unlock(&cls.rwlock);
             check_tables(&cls, -1, -1, -1);
             free_rule(target);
         }
diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c
index b001c24..de68f47 100644
--- a/utilities/ovs-ofctl.c
+++ b/utilities/ovs-ofctl.c
@@ -2319,9 +2319,7 @@ fte_free_all(struct classifier *cls)
     struct fte *fte, *next;
 
     CLS_FOR_EACH_SAFE (fte, next, rule, cls) {
-        fat_rwlock_wrlock(&cls->rwlock);
         classifier_remove(cls, &fte->rule);
-        fat_rwlock_unlock(&cls->rwlock);
         fte_free(fte);
     }
     classifier_destroy(cls);
@@ -2342,9 +2340,7 @@ fte_insert(struct classifier *cls, const struct match *match,
     cls_rule_init(&fte->rule, match, priority);
     fte->versions[index] = version;
 
-    fat_rwlock_wrlock(&cls->rwlock);
     old = fte_from_cls_rule(classifier_replace(cls, &fte->rule));
-    fat_rwlock_unlock(&cls->rwlock);
     if (old) {
         fte_version_free(old->versions[index]);
         fte->versions[!index] = old->versions[!index];
-- 
1.7.10.4




More information about the dev mailing list