[ovs-dev] [RFC 3/5] dpif-netdev: Do not sleep when swapping queues.
Eelco Chaudron
echaudro at redhat.com
Tue May 7 13:55:02 UTC 2019
This patch look fine to me assuming Ilya’s comments in patch 1 are
taking care of.
Acked-by: Eelco Chaudron <echaudro at redhat.com>
On 30 Apr 2019, at 14:18, David Marchand wrote:
> When swapping queues from a pmd thread to another (q0 polled by
> pmd0/q1
> polled by pmd1 -> q1 polled by pmd0/q0 polled by pmd1), the current
> "Step 5" puts both pmds to sleep waiting for the control thread to
> wake
> them up later.
>
> Prefer to make them spin in such a case to avoid sleeping an
> undeterministic amount of time.
>
> Signed-off-by: David Marchand <david.marchand at redhat.com>
> ---
> lib/dpif-netdev.c | 47
> ++++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 42 insertions(+), 5 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index b2b21fd..d67cdb5 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -684,6 +684,7 @@ struct dp_netdev_pmd_thread {
> struct seq *reload_seq;
> uint64_t last_reload_seq;
> atomic_bool reload; /* Do we need to reload ports? */
> + atomic_bool wait_for_reload; /* Can we busy wait for the next
> reload? */
> atomic_bool exit; /* For terminating the pmd
> thread. */
> pthread_t thread;
> unsigned core_id; /* CPU core id of this pmd
> thread. */
> @@ -4893,6 +4894,33 @@ reconfigure_datapath(struct dp_netdev *dp)
> HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
> if (poll->rxq->pmd != pmd) {
> dp_netdev_del_rxq_from_pmd(pmd, poll);
> +
> + /* This pmd might sleep after this step reload if it
> has no
> + * rxq remaining. Can we tell it to busy wait for new
> rxq at
> + * Step 6 ? */
> + if (hmap_count(&pmd->poll_list) == 0) {
> + HMAP_FOR_EACH (port, node, &dp->ports) {
> + int qid;
> +
> + if (!netdev_is_pmd(port->netdev)) {
> + continue;
> + }
> +
> + for (qid = 0; qid < port->n_rxq; qid++) {
> + struct dp_netdev_rxq *q =
> &port->rxqs[qid];
> +
> + if (q->pmd == pmd) {
> +
> atomic_store_relaxed(&q->pmd->wait_for_reload,
> + true);
> + break;
> + }
> + }
> +
> + if (qid != port->n_rxq) {
> + break;
> + }
> + }
> + }
> }
> }
> ovs_mutex_unlock(&pmd->port_mutex);
> @@ -5410,7 +5438,9 @@ pmd_thread_main(void *f_)
> struct pmd_perf_stats *s = &pmd->perf_stats;
> unsigned int lc = 0;
> struct polled_queue *poll_list;
> + bool wait_for_reload = false;
> bool exiting;
> + bool reload;
> int poll_cnt;
> int i;
> int process_packets = 0;
> @@ -5438,9 +5468,16 @@ reload:
> }
>
> if (!poll_cnt) {
> - while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) {
> - seq_wait(pmd->reload_seq, pmd->last_reload_seq);
> - poll_block();
> + /* Don't sleep, control thread will ask for a reload shortly.
> */
> + if (wait_for_reload) {
> + do {
> + atomic_read_relaxed(&pmd->reload, &reload);
> + } while (!reload);
> + } else {
> + while (seq_read(pmd->reload_seq) == pmd->last_reload_seq)
> {
> + seq_wait(pmd->reload_seq, pmd->last_reload_seq);
> + poll_block();
> + }
> }
> lc = UINT_MAX;
> }
> @@ -5479,8 +5516,6 @@ reload:
> }
>
> if (lc++ > 1024) {
> - bool reload;
> -
> lc = 0;
>
> coverage_try_clear();
> @@ -5500,6 +5535,7 @@ reload:
> ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);
>
> poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
> + atomic_read_relaxed(&pmd->wait_for_reload, &wait_for_reload);
> atomic_read_relaxed(&pmd->exit, &exiting);
> /* Signal here to make sure the pmd finishes
> * reloading the updated configuration. */
> @@ -5834,6 +5870,7 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
> static void
> dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
> {
> + atomic_store_relaxed(&pmd->wait_for_reload, false);
> atomic_store_relaxed(&pmd->reload, false);
> pmd->last_reload_seq = seq_read(pmd->reload_seq);
> atomic_count_dec(&pmd->dp->reloading_pmds);
> --
> 1.8.3.1
More information about the dev
mailing list