[ovs-dev] [PATCH v2 2/7] netdev-dpdk: Adapt the requested number of tx and rx queues.
Ethan Jackson
ethan at nicira.com
Fri Apr 10 01:21:51 UTC 2015
Acked-by: Ethan Jackson <ethan at nicira.com>
On Thu, Apr 2, 2015 at 9:00 AM, Daniele Di Proietto
<diproiettod at vmware.com> wrote:
> This commit changes the semantics of 'netdev_set_multiq()' to allow OVS
> DPDK to run on device with limited multi queue support.
>
> * If a netdev doesn't have the requested number of rxqs it can simply
> inform the datapath without failing.
> * If a netdev doesn't have the requested number of txqs it should try
> to create as many as possible and use locking.
>
> Signed-off-by: Daniele Di Proietto <diproiettod at vmware.com>
> ---
> lib/netdev-dpdk.c | 96 ++++++++++++++++++++++++++++++++++-----------------
> lib/netdev-provider.h | 11 ++++++
> lib/netdev.c | 10 ++++++
> vswitchd/vswitch.xml | 2 +-
> 4 files changed, 87 insertions(+), 32 deletions(-)
>
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 16bdae3..28effd9 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -186,6 +186,10 @@ struct dpdk_tx_queue {
> bool flush_tx; /* Set to true to flush queue everytime */
> /* pkts are queued. */
> int count;
> + rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
> + * from concurrent access. It is used only
> + * if the queue is shared among different
> + * pmd threads (see 'txq_needs_locking'). */
> uint64_t tsc;
> struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
> };
> @@ -233,9 +237,19 @@ struct netdev_dpdk {
> /* virtio-net structure for vhost device */
> OVSRCU_TYPE(struct virtio_net *) virtio_dev;
>
> + /* The user might request more txqs than the NIC has. We remap those
> + * ('up.n_txq') on these ('real_n_txq').
> + * If the numbers match, 'txq_needs_locking' is false, otherwise it is
> + * true and we will take a spinlock on transmission */
> + int real_n_txq;
> + bool txq_needs_locking;
> +
> + /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
> + * dpdk_tx_queue */
> + rte_spinlock_t vhost_tx_lock;
> +
> /* In dpdk_list. */
> struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
> - rte_spinlock_t txq_lock;
> };
>
> struct netdev_rxq_dpdk {
> @@ -433,6 +447,7 @@ static int
> dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
> {
> struct rte_pktmbuf_pool_private *mbp_priv;
> + struct rte_eth_dev_info info;
> struct ether_addr eth_addr;
> int diag;
> int i;
> @@ -441,14 +456,24 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
> return ENODEV;
> }
>
> - diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
> + rte_eth_dev_info_get(dev->port_id, &info);
> + /* Adjust the number of rx queues to those available on the device. */
> + dev->up.n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
> +
> + /* Adjust the number of tx queues. This change is not visible to the user.
> + * We will pretend that there are 'dev->up.n_txq', while using only
> + * 'dev->real_n_txq'*/
> + dev->real_n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
> +
> + diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->real_n_txq,
> &port_conf);
> if (diag) {
> - VLOG_ERR("eth dev config error %d",diag);
> + VLOG_ERR("eth dev config error %d. rxq:%d txq:%d", diag, dev->up.n_rxq,
> + dev->real_n_txq);
> return -diag;
> }
>
> - for (i = 0; i < dev->up.n_txq; i++) {
> + for (i = 0; i < dev->real_n_txq; i++) {
> diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
> dev->socket_id, &tx_conf);
> if (diag) {
> @@ -510,14 +535,20 @@ netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
> int i;
>
> netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
> - /* Each index is considered as a cpu core id, since there should
> - * be one tx queue for each cpu core. */
> for (i = 0; i < n_txqs; i++) {
> int numa_id = ovs_numa_get_numa_id(i);
>
> - /* If the corresponding core is not on the same numa node
> - * as 'netdev', flags the 'flush_tx'. */
> - netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
> + if (!netdev->txq_needs_locking) {
> + /* Each index is considered as a cpu core id, since there should
> + * be one tx queue for each cpu core.
> + * If the corresponding core is not on the same numa node
> + * as 'netdev', flags the 'flush_tx'. */
> + netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
> + } else {
> + /* Queues are shared among CPUs. Always flush */
> + netdev->tx_q[i].flush_tx = true;
> + }
> + rte_spinlock_init(&netdev->tx_q[i].tx_lock);
> }
> }
>
> @@ -550,7 +581,6 @@ netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
> netdev->flags = 0;
> netdev->mtu = ETHER_MTU;
> netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
> - rte_spinlock_init(&netdev->txq_lock);
>
> netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
> if (!netdev->dpdk_mp) {
> @@ -560,6 +590,7 @@ netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
>
> netdev_->n_txq = NR_QUEUE;
> netdev_->n_rxq = NR_QUEUE;
> + netdev->real_n_txq = NR_QUEUE;
>
> if (type == DPDK_DEV_ETH) {
> netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
> @@ -597,6 +628,7 @@ dpdk_dev_parse_name(const char dev_name[], const char prefix[],
> static int
> netdev_dpdk_vhost_construct(struct netdev *netdev_)
> {
> + struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
> int err;
>
> if (rte_eal_init_ret) {
> @@ -607,6 +639,8 @@ netdev_dpdk_vhost_construct(struct netdev *netdev_)
> err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
> ovs_mutex_unlock(&dpdk_mutex);
>
> + rte_spinlock_init(&netdev->vhost_tx_lock);
> +
> return err;
> }
>
> @@ -681,7 +715,8 @@ netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
> ovs_mutex_lock(&dev->mutex);
>
> smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
> - smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
> + smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
> + smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
> ovs_mutex_unlock(&dev->mutex);
>
> return 0;
> @@ -718,8 +753,10 @@ netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
> netdev->up.n_rxq = n_rxq;
>
> rte_free(netdev->tx_q);
> - netdev_dpdk_alloc_txq(netdev, n_txq);
> err = dpdk_eth_dev_init(netdev);
> + netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
> +
> + netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
>
> ovs_mutex_unlock(&netdev->mutex);
> ovs_mutex_unlock(&dpdk_mutex);
> @@ -729,7 +766,7 @@ netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
>
> static int
> netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
> - unsigned int n_rxq)
> + unsigned int n_rxq)
> {
> struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
> int err = 0;
> @@ -742,7 +779,8 @@ netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
> ovs_mutex_lock(&netdev->mutex);
>
> netdev->up.n_txq = n_txq;
> - netdev->up.n_rxq = n_rxq;
> + netdev->real_n_txq = 1;
> + netdev->up.n_rxq = 1;
>
> ovs_mutex_unlock(&netdev->mutex);
> ovs_mutex_unlock(&dpdk_mutex);
> @@ -920,15 +958,15 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
> }
>
> /* There is vHost TX single queue, So we need to lock it for TX. */
> - rte_spinlock_lock(&vhost_dev->txq_lock);
> + rte_spinlock_lock(&vhost_dev->vhost_tx_lock);
> tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
> (struct rte_mbuf **)pkts, cnt);
> + rte_spinlock_unlock(&vhost_dev->vhost_tx_lock);
>
> rte_spinlock_lock(&vhost_dev->stats_lock);
> vhost_dev->stats.tx_packets += tx_pkts;
> vhost_dev->stats.tx_dropped += (cnt - tx_pkts);
> rte_spinlock_unlock(&vhost_dev->stats_lock);
> - rte_spinlock_unlock(&vhost_dev->txq_lock);
>
> out:
> if (may_steal) {
> @@ -1108,7 +1146,16 @@ netdev_dpdk_eth_send(struct netdev *netdev, int qid,
> {
> struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
>
> + if (dev->txq_needs_locking) {
> + qid = qid % dev->real_n_txq;
> + rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> + }
> +
> netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
> +
> + if (dev->txq_needs_locking) {
> + rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> + }
> return 0;
> }
>
> @@ -1754,19 +1801,6 @@ dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dp
> }
>
> static int
> -netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
> - struct dp_packet **pkts, int cnt, bool may_steal)
> -{
> - struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
> -
> - /* DPDK Rings have a single TX queue, Therefore needs locking. */
> - rte_spinlock_lock(&dev->txq_lock);
> - netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
> - rte_spinlock_unlock(&dev->txq_lock);
> - return 0;
> -}
> -
> -static int
> netdev_dpdk_ring_construct(struct netdev *netdev)
> {
> unsigned int port_no = 0;
> @@ -1940,8 +1974,8 @@ const struct netdev_class dpdk_ring_class =
> NULL,
> netdev_dpdk_ring_construct,
> netdev_dpdk_destruct,
> - NULL,
> - netdev_dpdk_ring_send,
> + netdev_dpdk_set_multiq,
> + netdev_dpdk_eth_send,
> netdev_dpdk_get_carrier,
> netdev_dpdk_get_stats,
> netdev_dpdk_get_features,
> diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h
> index fabeb2d..9276b07 100644
> --- a/lib/netdev-provider.h
> +++ b/lib/netdev-provider.h
> @@ -280,6 +280,17 @@ struct netdev_class {
> /* Configures the number of tx queues and rx queues of 'netdev'.
> * Return 0 if successful, otherwise a positive errno value.
> *
> + * 'n_rxq' specifies the maximum number of receive queues to create.
> + * The netdev provider might choose to create less (e.g. if the hardware
> + * supports only a smaller number). The actual number of queues created
> + * is stored in the 'netdev->n_rxq' field.
> + *
> + * 'n_txq' specifies the exact number of transmission queues to create.
> + * The caller will call netdev_send() concurrently from 'n_txq' different
> + * threads (with different qid). The netdev provider is responsible for
> + * making sure that these concurrent calls do not create a race condition
> + * by using multiple hw queues or locking.
> + *
> * On error, the tx queue and rx queue configuration is indeterminant.
> * Caller should make decision on whether to restore the previous or
> * the default configuration. Also, caller must make sure there is no
> diff --git a/lib/netdev.c b/lib/netdev.c
> index c1d9b8f..0c73b0c 100644
> --- a/lib/netdev.c
> +++ b/lib/netdev.c
> @@ -674,6 +674,16 @@ netdev_rxq_drain(struct netdev_rxq *rx)
> /* Configures the number of tx queues and rx queues of 'netdev'.
> * Return 0 if successful, otherwise a positive errno value.
> *
> + * 'n_rxq' specifies the maximum number of receive queues to create.
> + * The netdev provider might choose to create less (e.g. if the hardware
> + * supports only a smaller number). The caller can check how many have been
> + * actually created by calling 'netdev_n_rxq()'
> + *
> + * 'n_txq' specifies the exact number of transmission queues to create.
> + * If this function returns successfully, the caller can make 'n_txq'
> + * concurrent calls to netdev_send() (each one with a different 'qid' in the
> + * range [0..'n_txq'-1]).
> + *
> * On error, the tx queue and rx queue configuration is indeterminant.
> * Caller should make decision on whether to restore the previous or
> * the default configuration. Also, caller must make sure there is no
> diff --git a/vswitchd/vswitch.xml b/vswitchd/vswitch.xml
> index 3256ce0..b8fd458 100644
> --- a/vswitchd/vswitch.xml
> +++ b/vswitchd/vswitch.xml
> @@ -155,7 +155,7 @@
> <column name="other_config" key="n-dpdk-rxqs"
> type='{"type": "integer", "minInteger": 1}'>
> <p>
> - Specifies the number of rx queues to be created for each dpdk
> + Specifies the maximum number of rx queues to be created for each dpdk
> interface. If not specified or specified to 0, one rx queue will
> be created for each dpdk interface by default.
> </p>
> --
> 2.1.4
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev
More information about the dev
mailing list