[ovs-dev] [PATCH v8 1/2] dpif-netdev: unique and sequential tx_qids.

Ilya Maximets i.maximets at samsung.com
Wed Dec 23 12:16:21 UTC 2015


Currently tx_qid is equal to pmd->core_id. This leads to unexpected
behavior if pmd-cpu-mask different from '/(0*)(1|3|7)?(f*)/',
e.g. if core_ids are not sequential, or doesn't start from 0, or both.

Example:
	starting 2 pmd threads with 1 port, 2 rxqs per port,
	pmd-cpu-mask = 00000014 and let dev->real_n_txq = 2

	It that case pmd_1->tx_qid = 2, pmd_2->tx_qid = 4 and
	txq_needs_locking = true (if device hasn't ovs_numa_get_n_cores()+1
	queues).

	In that case, after truncating in netdev_dpdk_send__():
		'qid = qid % dev->real_n_txq;'
	pmd_1: qid = 2 % 2 = 0
	pmd_2: qid = 4 % 2 = 0

	So, both threads will call dpdk_queue_pkts() with same qid = 0.
	This is unexpected behavior if there is 2 tx queues in device.
	Queue #1 will not be used and both threads will lock queue #0
	on each send.

Fix that by introducing per pmd thread 'global_index', which is used
for calculation of tx_qid.

Signed-off-by: Ilya Maximets <i.maximets at samsung.com>
---
 lib/dpif-netdev.c | 46 ++++++++++++++++++++++++++++++++++------------
 1 file changed, 34 insertions(+), 12 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index cd72e62..c300e8a 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -425,6 +425,9 @@ struct dp_netdev_pmd_thread {
     pthread_t thread;
     int index;                      /* Idx of this pmd thread among pmd*/
                                     /* threads on same numa node. */
+    int global_index;               /* Idx of this pmd thread among all
+                                     * pmd threads in dp. Used to
+                                     * calculate tx_qid.*/
     unsigned core_id;               /* CPU core id of this pmd thread. */
     int numa_id;                    /* numa node id of this pmd thread. */
     int tx_qid;                     /* Queue id used by this pmd thread to
@@ -1272,6 +1275,13 @@ get_port_by_name(struct dp_netdev *dp,
 }
 
 static int
+get_n_pmd_threads(struct dp_netdev *dp)
+{
+    /* There is one non pmd thread in dp->poll_threads */
+    return cmap_count(&dp->poll_threads) - 1;
+}
+
+static int
 get_n_pmd_threads_on_numa(struct dp_netdev *dp, int numa_id)
 {
     struct dp_netdev_pmd_thread *pmd;
@@ -2605,6 +2615,8 @@ pmd_load_queues(struct dp_netdev_pmd_thread *pmd,
     n_pmds_on_numa = get_n_pmd_threads_on_numa(pmd->dp, pmd->numa_id);
     index = 0;
 
+    pmd->tx_qid = pmd->global_index;
+
     CMAP_FOR_EACH (port, node, &pmd->dp->ports) {
         /* Calls port_try_ref() to prevent the main thread
          * from deleting the port. */
@@ -2807,16 +2819,6 @@ dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos)
     return next;
 }
 
-static int
-core_id_to_qid(unsigned core_id)
-{
-    if (core_id != NON_PMD_CORE_ID) {
-        return core_id;
-    } else {
-        return ovs_numa_get_n_cores();
-    }
-}
-
 /* Configures the 'pmd' based on the input argument. */
 static void
 dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
@@ -2824,10 +2826,14 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
 {
     pmd->dp = dp;
     pmd->index = index;
+    pmd->global_index = get_n_pmd_threads(dp);
     pmd->core_id = core_id;
-    pmd->tx_qid = core_id_to_qid(core_id);
     pmd->numa_id = numa_id;
 
+    if (core_id == NON_PMD_CORE_ID) {
+        pmd->tx_qid = ovs_numa_get_n_cores();
+    }
+
     ovs_refcount_init(&pmd->ref_cnt);
     latch_init(&pmd->exit_latch);
     atomic_init(&pmd->change_seq, PMD_INITIAL_SEQ);
@@ -2893,17 +2899,33 @@ dp_netdev_destroy_all_pmds(struct dp_netdev *dp)
     }
 }
 
-/* Deletes all pmd threads on numa node 'numa_id'. */
+/* Deletes all pmd threads on numa node 'numa_id' and
+ * fixes global indexes of other threads. */
 static void
 dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id)
 {
     struct dp_netdev_pmd_thread *pmd;
+    int n_pmds_on_numa, n_pmds;
+    int *free_idx, k = 0;
+
+    n_pmds_on_numa = get_n_pmd_threads_on_numa(dp, numa_id);
+    free_idx = xmalloc(n_pmds_on_numa * sizeof *free_idx);
 
     CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
         if (pmd->numa_id == numa_id) {
+            free_idx[k++] = pmd->global_index;
             dp_netdev_del_pmd(dp, pmd);
         }
     }
+
+    n_pmds = get_n_pmd_threads(dp);
+    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+        if (pmd->global_index >= n_pmds) {
+            pmd->global_index = free_idx[--k];
+        }
+    }
+
+    free(free_idx);
 }
 
 /* Checks the numa node id of 'netdev' and starts pmd threads for
-- 
2.1.4




More information about the dev mailing list