[ovs-dev] [PATCH 6/7] ofproto-dpif: Use ovs_spinlock for stats.

Jarno Rajahalme jrajahalme at nicira.com
Wed Feb 12 00:30:48 UTC 2014


ovs_mutex showed high up in 'perf' stats, use a spinlock instead.

Signed-off-by: Jarno Rajahalme <jrajahalme at nicira.com>
---
 ofproto/ofproto-dpif.c |   95 +++++++++++++++++++++++++++---------------------
 1 file changed, 54 insertions(+), 41 deletions(-)

diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 08570f1..7c12582 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -86,7 +86,7 @@ struct rule_dpif {
      *
      *   - Do include packets and bytes from datapath flows which have not
      *   recently been processed by a revalidator. */
-    struct ovs_mutex stats_mutex;
+    struct ovs_spinlock stats_spinlock;
     uint64_t packet_count OVS_GUARDED;  /* Number of packets received. */
     uint64_t byte_count OVS_GUARDED;    /* Number of bytes received. */
     long long int used;                 /* Last used time (msec). */
@@ -104,7 +104,7 @@ struct group_dpif {
      *
      *   - Do include packets and bytes from datapath flows which have not
      *   recently been processed by a revalidator. */
-    struct ovs_mutex stats_mutex;
+    struct ovs_spinlock stats_spinlock;
     uint64_t packet_count OVS_GUARDED;  /* Number of packets received. */
     uint64_t byte_count OVS_GUARDED;    /* Number of bytes received. */
     struct bucket_counter *bucket_stats OVS_GUARDED;  /* Bucket statistics. */
@@ -289,7 +289,7 @@ struct ofproto_dpif {
     bool lacp_enabled;
     struct mbridge *mbridge;
 
-    struct ovs_mutex stats_mutex;
+    struct ovs_spinlock stats_spinlock;
     struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
                                             * consumed in userspace. */
 
@@ -1049,7 +1049,7 @@ construct(struct ofproto *ofproto_)
     ofproto->mbridge = mbridge_create();
     ofproto->has_bonded_bundles = false;
     ofproto->lacp_enabled = false;
-    ovs_mutex_init(&ofproto->stats_mutex);
+    ovs_spinlock_init(&ofproto->stats_spinlock);
     ovs_mutex_init(&ofproto->vsp_mutex);
 
     guarded_list_init(&ofproto->pins);
@@ -1221,7 +1221,7 @@ destruct(struct ofproto *ofproto_)
     sset_destroy(&ofproto->ghost_ports);
     sset_destroy(&ofproto->port_poll_set);
 
-    ovs_mutex_destroy(&ofproto->stats_mutex);
+    ovs_spinlock_destroy(&ofproto->stats_spinlock);
     ovs_mutex_destroy(&ofproto->vsp_mutex);
 
     close_dpif_backer(ofproto->backer);
@@ -2778,7 +2778,7 @@ port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
     if (!error && ofport_->ofp_port == OFPP_LOCAL) {
         struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
 
-        ovs_mutex_lock(&ofproto->stats_mutex);
+        ovs_spinlock_lock(&ofproto->stats_spinlock);
         /* ofproto->stats.tx_packets represents packets that we created
          * internally and sent to some port (e.g. packets sent with
          * ofproto_dpif_send_packet()).  Account for them as if they had
@@ -2803,7 +2803,7 @@ port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
         if (stats->tx_bytes != UINT64_MAX) {
             stats->tx_bytes += ofproto->stats.rx_bytes;
         }
-        ovs_mutex_unlock(&ofproto->stats_mutex);
+        ovs_spinlock_unlock(&ofproto->stats_spinlock);
     }
 
     return error;
@@ -2940,9 +2940,9 @@ rule_expire(struct rule_dpif *rule)
     if (reason < 0 && idle_timeout) {
         long long int used;
 
-        ovs_mutex_lock(&rule->stats_mutex);
+        ovs_spinlock_lock(&rule->stats_spinlock);
         used = rule->used;
-        ovs_mutex_unlock(&rule->stats_mutex);
+        ovs_spinlock_unlock(&rule->stats_spinlock);
 
         if (now > used + idle_timeout * 1000) {
             reason = OFPRR_IDLE_TIMEOUT;
@@ -3008,11 +3008,11 @@ void
 rule_dpif_credit_stats(struct rule_dpif *rule,
                        const struct dpif_flow_stats *stats)
 {
-    ovs_mutex_lock(&rule->stats_mutex);
+    ovs_spinlock_lock(&rule->stats_spinlock);
     rule->packet_count += stats->n_packets;
     rule->byte_count += stats->n_bytes;
     rule->used = MAX(rule->used, stats->used);
-    ovs_mutex_unlock(&rule->stats_mutex);
+    ovs_spinlock_unlock(&rule->stats_spinlock);
 }
 
 bool
@@ -3176,7 +3176,7 @@ rule_construct(struct rule *rule_)
     OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
-    ovs_mutex_init(&rule->stats_mutex);
+    ovs_spinlock_init(&rule->stats_spinlock);
     rule->packet_count = 0;
     rule->byte_count = 0;
     rule->used = rule->up.modified;
@@ -3203,7 +3203,7 @@ static void
 rule_destruct(struct rule *rule_)
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
-    ovs_mutex_destroy(&rule->stats_mutex);
+    ovs_spinlock_destroy(&rule->stats_spinlock);
 }
 
 static void
@@ -3212,11 +3212,11 @@ rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
 
-    ovs_mutex_lock(&rule->stats_mutex);
+    ovs_spinlock_lock(&rule->stats_spinlock);
     *packets = rule->packet_count;
     *bytes = rule->byte_count;
     *used = rule->used;
-    ovs_mutex_unlock(&rule->stats_mutex);
+    ovs_spinlock_unlock(&rule->stats_spinlock);
 }
 
 static void
@@ -3244,10 +3244,10 @@ rule_modify_actions(struct rule *rule_, bool reset_counters)
     struct rule_dpif *rule = rule_dpif_cast(rule_);
 
     if (reset_counters) {
-        ovs_mutex_lock(&rule->stats_mutex);
+        ovs_spinlock_lock(&rule->stats_spinlock);
         rule->packet_count = 0;
         rule->byte_count = 0;
-        ovs_mutex_unlock(&rule->stats_mutex);
+        ovs_spinlock_unlock(&rule->stats_spinlock);
     }
 
     complete_operation(rule);
@@ -3274,7 +3274,7 @@ group_dealloc(struct ofgroup *group_)
 
 static void
 group_construct_stats(struct group_dpif *group)
-    OVS_REQUIRES(group->stats_mutex)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     group->packet_count = 0;
     group->byte_count = 0;
@@ -3289,31 +3289,27 @@ group_construct_stats(struct group_dpif *group)
 
 static enum ofperr
 group_construct(struct ofgroup *group_)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     struct group_dpif *group = group_dpif_cast(group_);
-    ovs_mutex_init(&group->stats_mutex);
-    ovs_mutex_lock(&group->stats_mutex);
+    ovs_spinlock_init(&group->stats_spinlock);
     group_construct_stats(group);
-    ovs_mutex_unlock(&group->stats_mutex);
     return 0;
 }
 
 static void
-group_destruct__(struct group_dpif *group)
-    OVS_REQUIRES(group->stats_mutex)
-{
-    free(group->bucket_stats);
-    group->bucket_stats = NULL;
-}
-
-static void
 group_destruct(struct ofgroup *group_)
 {
     struct group_dpif *group = group_dpif_cast(group_);
-    ovs_mutex_lock(&group->stats_mutex);
-    group_destruct__(group);
-    ovs_mutex_unlock(&group->stats_mutex);
-    ovs_mutex_destroy(&group->stats_mutex);
+    struct bucket_counter *bucket_stats;
+
+    ovs_spinlock_lock(&group->stats_spinlock);
+    bucket_stats = group->bucket_stats;
+    group->bucket_stats = NULL;
+    ovs_spinlock_unlock(&group->stats_spinlock);
+    ovs_spinlock_destroy(&group->stats_spinlock);
+
+    free(bucket_stats);
 }
 
 static enum ofperr
@@ -3321,14 +3317,31 @@ group_modify(struct ofgroup *group_, struct ofgroup *victim_)
 {
     struct group_dpif *group = group_dpif_cast(group_);
     struct group_dpif *victim = group_dpif_cast(victim_);
+    struct bucket_counter *bucket_stats = NULL;
 
-    ovs_mutex_lock(&group->stats_mutex);
+    /* Allocate memory before critical section if needed. */
     if (victim->up.n_buckets < group->up.n_buckets) {
-        group_destruct__(group);
+        bucket_stats = xcalloc(group->up.n_buckets,
+                               sizeof *group->bucket_stats);
     }
-    group_construct_stats(group);
-    ovs_mutex_unlock(&group->stats_mutex);
 
+    ovs_spinlock_lock(&group->stats_spinlock);
+    group->packet_count = 0;
+    group->byte_count = 0;
+    if (bucket_stats) { /* Swap with newly allocated bucket stats. */
+        struct bucket_counter *temp = bucket_stats;
+        bucket_stats = group->bucket_stats;
+        group->bucket_stats = temp;
+    } else {
+        memset(group->bucket_stats, 0, group->up.n_buckets *
+               sizeof *group->bucket_stats);
+    }
+    ovs_spinlock_unlock(&group->stats_spinlock);
+
+    /* Free memory after critical section if needed. */
+    if (bucket_stats) {
+        free(bucket_stats);
+    }
     return 0;
 }
 
@@ -3337,12 +3350,12 @@ group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
 {
     struct group_dpif *group = group_dpif_cast(group_);
 
-    ovs_mutex_lock(&group->stats_mutex);
+    ovs_spinlock_lock(&group->stats_spinlock);
     ogs->packet_count = group->packet_count;
     ogs->byte_count = group->byte_count;
     memcpy(ogs->bucket_stats, group->bucket_stats,
            group->up.n_buckets * sizeof *group->bucket_stats);
-    ovs_mutex_unlock(&group->stats_mutex);
+    ovs_spinlock_unlock(&group->stats_spinlock);
 
     return 0;
 }
@@ -3393,10 +3406,10 @@ ofproto_dpif_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet
 
     error = xlate_send_packet(ofport, packet);
 
-    ovs_mutex_lock(&ofproto->stats_mutex);
+    ovs_spinlock_lock(&ofproto->stats_spinlock);
     ofproto->stats.tx_packets++;
     ofproto->stats.tx_bytes += packet->size;
-    ovs_mutex_unlock(&ofproto->stats_mutex);
+    ovs_spinlock_unlock(&ofproto->stats_spinlock);
     return error;
 }
 
-- 
1.7.10.4




More information about the dev mailing list