[ovs-dev] [RFC 2/2] dpif-netdev: Use per pmdthread datapath statistics
Daniele Di Proietto
ddiproietto at vmware.com
Sat Sep 20 19:12:47 UTC 2014
Signed-off-by: Daniele Di Proietto <ddiproietto at vmware.com>
---
lib/dpif-netdev.c | 70 ++++++++++++++++++-------------------------------------
1 file changed, 23 insertions(+), 47 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 90fe01c..e5f2aa0 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -62,6 +62,7 @@
#include "shash.h"
#include "sset.h"
#include "timeval.h"
+#include "u64-stats-lock.h"
#include "unixctl.h"
#include "util.h"
#include "vlog.h"
@@ -178,11 +179,6 @@ struct dp_netdev {
struct classifier cls;
struct cmap flow_table OVS_GUARDED; /* Flow table. */
- /* Statistics.
- *
- * ovsthread_stats is internally synchronized. */
- struct ovsthread_stats stats; /* Contains 'struct dp_netdev_stats *'. */
-
/* Ports.
*
* Protected by RCU. Take the mutex to add or remove ports. */
@@ -225,9 +221,9 @@ enum dp_stat_type {
/* Contained by struct dp_netdev's 'stats' member. */
struct dp_netdev_stats {
- struct ovs_mutex mutex; /* Protects 'n'. */
+ struct u64_stats_lock lock;
- /* Indexed by DP_STAT_*, protected by 'mutex'. */
+ /* Indexed by DP_STAT_*. */
unsigned long long int n[DP_N_STATS] OVS_GUARDED;
};
@@ -357,6 +353,7 @@ struct dp_netdev_pmd_thread {
* need to be protected (e.g. by 'dp_netdev_mutex'). All other
* instances will only be accessed by its own pmd thread. */
struct emc_cache flow_cache;
+ struct dp_netdev_stats stats; /* Per thread datapath statistics */
struct latch exit_latch; /* For terminating the pmd thread. */
atomic_uint change_seq; /* For reloading pmd ports. */
pthread_t thread;
@@ -560,8 +557,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
classifier_init(&dp->cls, NULL);
cmap_init(&dp->flow_table);
- ovsthread_stats_init(&dp->stats);
-
ovs_mutex_init(&dp->port_mutex);
cmap_init(&dp->ports);
dp->port_seq = seq_create();
@@ -625,8 +620,6 @@ dp_netdev_free(struct dp_netdev *dp)
OVS_REQUIRES(dp_netdev_mutex)
{
struct dp_netdev_port *port;
- struct dp_netdev_stats *bucket;
- int i;
shash_find_and_delete(&dp_netdevs, dp->name);
@@ -641,12 +634,6 @@ dp_netdev_free(struct dp_netdev *dp)
}
ovs_mutex_unlock(&dp->port_mutex);
- OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) {
- ovs_mutex_destroy(&bucket->mutex);
- free_cacheline(bucket);
- }
- ovsthread_stats_destroy(&dp->stats);
-
classifier_destroy(&dp->cls);
cmap_destroy(&dp->flow_table);
ovs_mutex_destroy(&dp->flow_mutex);
@@ -701,18 +688,18 @@ static int
dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- struct dp_netdev_stats *bucket;
- size_t i;
+ struct dp_netdev_pmd_thread *pmd;
stats->n_flows = cmap_count(&dp->flow_table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
- OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) {
- ovs_mutex_lock(&bucket->mutex);
- stats->n_hit += bucket->n[DP_STAT_HIT];
- stats->n_missed += bucket->n[DP_STAT_MISS];
- stats->n_lost += bucket->n[DP_STAT_LOST];
- ovs_mutex_unlock(&bucket->mutex);
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ u64_stats_lock_acquire(&pmd->stats.lock);
+ stats->n_hit += pmd->stats.n[DP_STAT_HIT];
+ stats->n_missed += pmd->stats.n[DP_STAT_MISS];
+ stats->n_lost += pmd->stats.n[DP_STAT_LOST];
+ u64_stats_lock_release(&pmd->stats.lock);
}
stats->n_masks = UINT32_MAX;
stats->n_mask_hit = UINT64_MAX;
@@ -2395,23 +2382,13 @@ dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
ovs_mutex_unlock(&bucket->mutex);
}
-static void *
-dp_netdev_stats_new_cb(void)
-{
- struct dp_netdev_stats *bucket = xzalloc_cacheline(sizeof *bucket);
- ovs_mutex_init(&bucket->mutex);
- return bucket;
-}
-
static void
-dp_netdev_count_packet(struct dp_netdev *dp, enum dp_stat_type type, int cnt)
+dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd,
+ enum dp_stat_type type, int cnt)
{
- struct dp_netdev_stats *bucket;
-
- bucket = ovsthread_stats_bucket_get(&dp->stats, dp_netdev_stats_new_cb);
- ovs_mutex_lock(&bucket->mutex);
- bucket->n[type] += cnt;
- ovs_mutex_unlock(&bucket->mutex);
+ u64_stats_lock_acquire(&pmd->stats.lock);
+ pmd->stats.n[type] += cnt;
+ u64_stats_lock_release(&pmd->stats.lock);
}
static int
@@ -2422,10 +2399,6 @@ dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_,
{
struct ofpbuf *packet = &packet_->ofpbuf;
- if (type == DPIF_UC_MISS) {
- dp_netdev_count_packet(dp, DP_STAT_MISS, 1);
- }
-
if (OVS_UNLIKELY(!dp->upcall_cb)) {
return ENODEV;
}
@@ -2517,7 +2490,7 @@ packet_batch_execute(struct packet_batch *batch,
dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true,
&batch->md, actions->actions, actions->size);
- dp_netdev_count_packet(pmd->dp, DP_STAT_HIT, batch->packet_count);
+ dp_netdev_count_packet(pmd, DP_STAT_HIT, batch->packet_count);
}
static inline bool
@@ -2644,6 +2617,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
struct ofpbuf actions, put_actions;
struct match match;
+ int miss_cnt = 0;
ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
@@ -2665,6 +2639,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
rules[i] = CONST_CAST(struct cls_rule *, &netdev_flow->cr);
continue;
}
+ miss_cnt++;
miniflow_expand(mfs[i], &match.flow);
@@ -2703,6 +2678,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
}
ovs_mutex_unlock(&dp->flow_mutex);
}
+ dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
ofpbuf_uninit(&actions);
ofpbuf_uninit(&put_actions);
@@ -2770,8 +2746,8 @@ dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt,
{
struct dp_netdev_execute_aux *aux = aux_;
uint32_t *depth = recirc_depth_get();
- struct dp_netdev_pmd_thread *pmd= aux->pmd;
- struct dp_netdev *dp= pmd->dp;
+ struct dp_netdev_pmd_thread *pmd = aux->pmd;
+ struct dp_netdev *dp = pmd->dp;
int type = nl_attr_type(a);
struct dp_netdev_port *p;
int i;
--
2.1.0.rc1
More information about the dev
mailing list