[ovs-dev] [PATCH 2/5] dpif-netdev: Trigger parallel pmd reloads.
David Marchand
david.marchand at redhat.com
Thu May 23 14:23:49 UTC 2019
pmd reloads are currently serialised in each steps calling
reload_affected_pmds.
Any pmd processing packets, waiting on a mutex etc... will make other
pmd threads wait for a delay that can be undeterministic when syscalls
adds up.
Switch to a little busy loop on the control thread using an atomic
count.
The memory order on this atomic is rel-acq to have an explicit
synchronisation between the pmd threads and the control thread.
Signed-off-by: David Marchand <david.marchand at redhat.com>
Acked-by: Eelco Chaudron <echaudro at redhat.com>
---
lib/dpif-netdev.c | 50 +++++++++++++++++++++++++++++++++++++-------------
1 file changed, 37 insertions(+), 13 deletions(-)
---
Changelog since RFC v1:
- added memory ordering on 'reloading_pmds' atomic to serve as a
synchronisation point between pmd threads and control thread
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 19d7f7d..23cf6a6 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -334,6 +334,9 @@ struct dp_netdev {
/* The time that a packet can wait in output batch for sending. */
atomic_uint32_t tx_flush_interval;
+ /* Count of pmds currently reloading */
+ atomic_uint32_t reloading_pmds;
+
/* Meters. */
struct ovs_mutex meter_locks[N_METER_LOCKS];
struct dp_meter *meters[MAX_METERS]; /* Meter bands. */
@@ -646,9 +649,6 @@ struct dp_netdev_pmd_thread {
struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
struct cmap_node node; /* In 'dp->poll_threads'. */
- pthread_cond_t cond; /* For synchronizing pmd thread reload. */
- struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
-
/* Per thread exact-match cache. Note, the instance for cpu core
* NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
* need to be protected by 'non_pmd_mutex'. Every other instance
@@ -1524,6 +1524,8 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
atomic_init(&dp->tx_flush_interval, DEFAULT_TX_FLUSH_INTERVAL);
+ atomic_init(&dp->reloading_pmds, 0);
+
cmap_init(&dp->poll_threads);
dp->pmd_rxq_assign_cyc = true;
@@ -1753,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
return;
}
- ovs_mutex_lock(&pmd->cond_mutex);
seq_change(pmd->reload_seq);
atomic_store_explicit(&pmd->reload, true, memory_order_release);
- ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
- ovs_mutex_unlock(&pmd->cond_mutex);
}
static uint32_t
@@ -4640,9 +4639,31 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
}
static void
+wait_reloading_pmds(struct dp_netdev *dp)
+{
+ uint32_t reloading;
+
+ do {
+ atomic_read_explicit(&dp->reloading_pmds, &reloading,
+ memory_order_acquire);
+ } while (reloading != 0);
+}
+
+static void
reload_affected_pmds(struct dp_netdev *dp)
{
struct dp_netdev_pmd_thread *pmd;
+ unsigned int pmd_count = 0;
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ if (pmd->core_id == NON_PMD_CORE_ID) {
+ continue;
+ }
+ if (pmd->need_reload) {
+ pmd_count++;
+ }
+ }
+ atomic_store_relaxed(&dp->reloading_pmds, pmd_count);
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->need_reload) {
@@ -4651,6 +4672,10 @@ reload_affected_pmds(struct dp_netdev *dp)
pmd->need_reload = false;
}
}
+
+ if (pmd_count != 0) {
+ wait_reloading_pmds(dp);
+ }
}
static void
@@ -5812,11 +5837,12 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
static void
dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
{
- ovs_mutex_lock(&pmd->cond_mutex);
+ uint32_t old;
+
atomic_store_relaxed(&pmd->reload, false);
pmd->last_reload_seq = seq_read(pmd->reload_seq);
- xpthread_cond_signal(&pmd->cond);
- ovs_mutex_unlock(&pmd->cond_mutex);
+ atomic_sub_explicit(&pmd->dp->reloading_pmds, 1, &old,
+ memory_order_release);
}
/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
@@ -5901,8 +5927,6 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
pmd->reload_seq = seq_create();
pmd->last_reload_seq = seq_read(pmd->reload_seq);
atomic_init(&pmd->reload, false);
- xpthread_cond_init(&pmd->cond, NULL);
- ovs_mutex_init(&pmd->cond_mutex);
ovs_mutex_init(&pmd->flow_mutex);
ovs_mutex_init(&pmd->port_mutex);
cmap_init(&pmd->flow_table);
@@ -5945,8 +5969,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
cmap_destroy(&pmd->flow_table);
ovs_mutex_destroy(&pmd->flow_mutex);
seq_destroy(pmd->reload_seq);
- xpthread_cond_destroy(&pmd->cond);
- ovs_mutex_destroy(&pmd->cond_mutex);
ovs_mutex_destroy(&pmd->port_mutex);
free(pmd);
}
@@ -5966,7 +5988,9 @@ dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
ovs_mutex_unlock(&dp->non_pmd_mutex);
} else {
atomic_store_relaxed(&pmd->exit, true);
+ atomic_store_relaxed(&dp->reloading_pmds, 1);
dp_netdev_reload_pmd__(pmd);
+ wait_reloading_pmds(dp);
xpthread_join(pmd->thread, NULL);
}
--
1.8.3.1
More information about the dev
mailing list