[ovs-dev] [PATCH 4/5] dpif-netdev: Assign PMD for failed pinned rxqs.

Kevin Traynor ktraynor at redhat.com
Fri Jun 4 21:18:55 UTC 2021


Previously, if pmd-rxq-affinity was used to pin an rxq to
a core that was not in pmd-cpu-mask the rxq was not polled
and the user received a warning.

Now that pinned and non-pinned rxqs are assigned to PMDs in
a common call to rxq scheduling, if an invalid core is
selected in pmd-rxq-affinity the rxq can be assigned an
available PMD (if any).

A warning will still be logged as the requested core could
not be used.

Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---
 Documentation/topics/dpdk/pmd.rst |  6 +++---
 lib/dpif-netdev.c                 | 30 ++++++++++++++++++++++++++++--
 tests/pmd.at                      |  5 ++++-
 3 files changed, 35 insertions(+), 6 deletions(-)

diff --git a/Documentation/topics/dpdk/pmd.rst b/Documentation/topics/dpdk/pmd.rst
index d1c45cdfb..29ba53954 100644
--- a/Documentation/topics/dpdk/pmd.rst
+++ b/Documentation/topics/dpdk/pmd.rst
@@ -108,7 +108,7 @@ means that this thread will only poll the *pinned* Rx queues.
 
    If there are no *non-isolated* PMD threads, *non-pinned* RX queues will not
-   be polled. Also, if the provided ``<core-id>`` is not available (e.g. the
-   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will not be polled
-   by any PMD thread.
+   be polled. If the provided ``<core-id>`` is not available (e.g. the
+   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will be assigned to
+   a *non-isolated* PMD, that will remain *non-isolated*.
 
 If ``pmd-rxq-affinity`` is not set for Rx queues, they will be assigned to PMDs
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 61e0a516f..377573233 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -5027,4 +5027,25 @@ find_sched_pmd_by_pmd(struct sched_numa_list *numa_list,
 }
 
+static struct sched_pmd *
+find_sched_pmd_by_rxq(struct sched_numa_list *numa_list,
+                      struct dp_netdev_rxq *rxq)
+{
+    struct sched_numa *numa;
+
+    HMAP_FOR_EACH (numa, node, &numa_list->numas) {
+        for (unsigned i = 0; i < numa->n_pmds; i++) {
+            struct sched_pmd *sched_pmd;
+
+            sched_pmd = &numa->pmds[i];
+            for (int k = 0; k < sched_pmd->n_rxq; k++) {
+                if (sched_pmd->rxqs[k] == rxq) {
+                    return sched_pmd;
+                }
+            }
+        }
+    }
+    return NULL;
+}
+
 static struct sched_numa *
 sched_numa_list_find_numa(struct sched_numa_list *numa_list,
@@ -5408,5 +5429,6 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
                             "Core %2u cannot be pinned with "
                             "port \'%s\' rx queue %d. Use pmd-cpu-mask to "
-                            "enable a pmd on core %u.",
+                            "enable a pmd on core %u. An alternative core "
+                            "will be assigned.",
                             rxq->core_id,
                             netdev_rxq_get_name(rxq->rx),
@@ -5453,5 +5475,9 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
 
         if (rxq->core_id != OVS_CORE_UNSPEC) {
-            continue;
+             /* This rxq should have been pinned, check it was. */
+            sched_pmd = find_sched_pmd_by_rxq(numa_list, rxq);
+            if (sched_pmd && sched_pmd->pmd->core_id == rxq->core_id) {
+                continue;
+            }
         }
 
diff --git a/tests/pmd.at b/tests/pmd.at
index 78105bf45..55977632a 100644
--- a/tests/pmd.at
+++ b/tests/pmd.at
@@ -552,7 +552,10 @@ AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=6])
 
 dnl We removed the cores requested by some queues from pmd-cpu-mask.
-dnl Those queues will not be polled.
+dnl Those queues will be polled by remaining non-isolated pmds.
 AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
+p1 0 0 1
+p1 1 0 1
 p1 2 0 2
+p1 3 0 1
 ])
 
-- 
2.31.1



More information about the dev mailing list