[ovs-dev] [PATCH v3 2/6] netdev-dpdk: Add netdev_dpdk_txq_flush function.

Bhanuprakash Bodireddy bhanuprakash.bodireddy at intel.com
Thu Jun 29 22:39:32 UTC 2017


This commit adds netdev_dpdk_txq_flush() function. If there are
any packets waiting in the queue, they are transmitted instantly
using the rte_eth_tx_burst function. In XPS enabled case, lock is
taken on the tx queue before flushing the queue.

Signed-off-by: Bhanuprakash Bodireddy <bhanuprakash.bodireddy at intel.com>
Signed-off-by: Antonio Fischetti <antonio.fischetti at intel.com>
Co-authored-by: Antonio Fischetti <antonio.fischetti at intel.com>
Signed-off-by: Markus Magnusson <markus.magnusson at ericsson.com>
Co-authored-by: Markus Magnusson <markus.magnusson at ericsson.com>
Acked-by: Eelco Chaudron <echaudro at redhat.com>
---
 lib/netdev-dpdk.c | 31 +++++++++++++++++++++++++++++--
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 9ca4433..dd42716 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -293,6 +293,11 @@ struct dpdk_mp {
     struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
 };
 
+/* Queue 'INTERIM_QUEUE_BURST_THRESHOLD' packets before transmitting.
+ * Defaults to 'NETDEV_MAX_BURST'(32) packets.
+ */
+#define INTERIM_QUEUE_BURST_THRESHOLD NETDEV_MAX_BURST
+
 /* There should be one 'struct dpdk_tx_queue' created for
  * each cpu core. */
 struct dpdk_tx_queue {
@@ -302,6 +307,12 @@ struct dpdk_tx_queue {
                                     * pmd threads (see 'concurrent_txq'). */
     int map;                       /* Mapping of configured vhost-user queues
                                     * to enabled by guest. */
+    int dpdk_pkt_cnt;              /* Number of buffered packets waiting to
+                                      be sent on DPDK tx queue. */
+    struct rte_mbuf *dpdk_burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
+                                   /* Intermediate queue where packets can
+                                    * be buffered to amortize the cost of MMIO
+                                    * writes. */
 };
 
 /* dpdk has no way to remove dpdk ring ethernet devices
@@ -1897,9 +1908,25 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
  * few packets (< INTERIM_QUEUE_BURST_THRESHOLD) buffered in the queue.
  */
 static int
-netdev_dpdk_txq_flush(struct netdev *netdev OVS_UNUSED,
-                      int qid OVS_UNUSED, bool concurrent_txq OVS_UNUSED)
+netdev_dpdk_txq_flush(struct netdev *netdev,
+                      int qid, bool concurrent_txq)
 {
+    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+
+    if (OVS_LIKELY(txq->dpdk_pkt_cnt)) {
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            qid = qid % dev->up.n_txq;
+            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+        }
+
+        netdev_dpdk_eth_tx_burst(dev, qid, txq->dpdk_burst_pkts,
+                                 txq->dpdk_pkt_cnt);
+
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+        }
+    }
     return 0;
 }
 
-- 
2.4.11



More information about the dev mailing list