[ovs-dev] [PATCH ovs v2 2/2] netdev-dpdk: Add dpdkvdpa port
Noa Ezra
noae at mellanox.com
Wed Oct 2 18:15:56 UTC 2019
dpdkvdpa netdev works with 3 components:
vhost-user socket, vdpa device: real vdpa device or a VF and
representor of "vdpa device".
In order to add a new vDPA port, add a new port to existing bridge
with type dpdkvdpa and vDPA options:
ovs-vsctl add-port br0 vdpa0 -- set Interface vdpa0 type=dpdkvdpa
options:vdpa-socket-path=<sock path>
options:vdpa-accelerator-devargs=<VF pci id>
options:dpdk-devargs=<vdpa pci id>,representor=[id]
On this command OVS will create a new netdev:
1. Register vhost-user-client device.
2. Open and configure VF dpdk port.
3. Open and configure representor dpdk port.
The new netdev will use netdev_rxq_recv() function in order to receive
packets from VF and push to vhost-user and receive packets from
vhost-user and push to VF.
Signed-off-by: Noa Ezra <noae at mellanox.com>
Reviewed-by: Oz Shlomo <ozsh at mellanox.com>
---
NEWS | 1 +
lib/netdev-dpdk.c | 162 +++++++++++++++++++++++++++++++++++++++++++++++++++
vswitchd/vswitch.xml | 25 ++++++++
3 files changed, 188 insertions(+)
diff --git a/NEWS b/NEWS
index f5a0b8f..6f315c6 100644
--- a/NEWS
+++ b/NEWS
@@ -542,6 +542,7 @@ v2.6.0 - 27 Sep 2016
* Remove dpdkvhostcuse port type.
* OVS client mode for vHost and vHost reconnect (Requires QEMU 2.7)
* 'dpdkvhostuserclient' port type.
+ * 'dpdkvdpa' port type.
- Increase number of registers to 16.
- ovs-benchmark: This utility has been removed due to lack of use and
bitrot.
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index bc20d68..16ddf58 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -47,6 +47,7 @@
#include "dpif-netdev.h"
#include "fatal-signal.h"
#include "netdev-provider.h"
+#include "netdev-dpdk-vdpa.h"
#include "netdev-vport.h"
#include "odp-util.h"
#include "openvswitch/dynamic-string.h"
@@ -137,6 +138,9 @@ typedef uint16_t dpdk_port_t;
/* Legacy default value for vhost tx retries. */
#define VHOST_ENQ_RETRY_DEF 8
+/* Size of VDPA custom stats. */
+#define VDPA_CUSTOM_STATS_SIZE 4
+
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
static const struct rte_eth_conf port_conf = {
@@ -461,6 +465,8 @@ struct netdev_dpdk {
int rte_xstats_ids_size;
uint64_t *rte_xstats_ids;
);
+
+ struct netdev_dpdk_vdpa_relay *relay;
};
struct netdev_rxq_dpdk {
@@ -1346,6 +1352,30 @@ netdev_dpdk_construct(struct netdev *netdev)
return err;
}
+static int
+netdev_dpdk_vdpa_construct(struct netdev *netdev)
+{
+ struct netdev_dpdk *dev;
+ int err;
+
+ err = netdev_dpdk_construct(netdev);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_construct failed. Port: %s\n", netdev->name);
+ goto out;
+ }
+
+ ovs_mutex_lock(&dpdk_mutex);
+ dev = netdev_dpdk_cast(netdev);
+ dev->relay = netdev_dpdk_vdpa_alloc_relay();
+ if (!dev->relay) {
+ err = ENOMEM;
+ }
+
+ ovs_mutex_unlock(&dpdk_mutex);
+out:
+ return err;
+}
+
static void
common_destruct(struct netdev_dpdk *dev)
OVS_REQUIRES(dpdk_mutex)
@@ -1428,6 +1458,19 @@ dpdk_vhost_driver_unregister(struct netdev_dpdk *dev OVS_UNUSED,
}
static void
+netdev_dpdk_vdpa_destruct(struct netdev *netdev)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+
+ ovs_mutex_lock(&dpdk_mutex);
+ netdev_dpdk_vdpa_destruct_impl(dev->relay);
+ rte_free(dev->relay);
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ netdev_dpdk_destruct(netdev);
+}
+
+static void
netdev_dpdk_vhost_destruct(struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
@@ -1878,6 +1921,47 @@ out:
}
static int
+netdev_dpdk_vdpa_set_config(struct netdev *netdev, const struct smap *args,
+ char **errp)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ const char *vdpa_accelerator_devargs =
+ smap_get(args, "vdpa-accelerator-devargs");
+ const char *vdpa_socket_path =
+ smap_get(args, "vdpa-socket-path");
+ int err = 0;
+
+ if ((vdpa_accelerator_devargs == NULL) || (vdpa_socket_path == NULL)) {
+ VLOG_ERR("netdev_dpdk_vdpa_set_config failed."
+ "Required arguments are missing for VDPA port %s",
+ netdev->name);
+ goto free_relay;
+ }
+
+ err = netdev_dpdk_set_config(netdev, args, errp);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_set_config failed. Port: %s", netdev->name);
+ goto free_relay;
+ }
+
+ err = netdev_dpdk_vdpa_config_impl(dev->relay, dev->port_id,
+ vdpa_socket_path,
+ vdpa_accelerator_devargs);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_vdpa_config_impl failed. Port %s",
+ netdev->name);
+ goto free_relay;
+ }
+
+ goto out;
+
+free_relay:
+ rte_free(dev->relay);
+out:
+ return err;
+}
+
+static int
netdev_dpdk_ring_set_config(struct netdev *netdev, const struct smap *args,
char **errp OVS_UNUSED)
{
@@ -2273,6 +2357,23 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
return 0;
}
+static int
+netdev_dpdk_vdpa_rxq_recv(struct netdev_rxq *rxq,
+ struct dp_packet_batch *batch,
+ int *qfill)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
+ int fwd_rx;
+ int ret;
+
+ fwd_rx = netdev_dpdk_vdpa_rxq_recv_impl(dev->relay, rxq->queue_id);
+ ret = netdev_dpdk_rxq_recv(rxq, batch, qfill);
+ if ((ret == EAGAIN) && fwd_rx) {
+ return 0;
+ }
+ return ret;
+}
+
static inline int
netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
int cnt, bool should_steal)
@@ -2854,6 +2955,29 @@ netdev_dpdk_vhost_get_custom_stats(const struct netdev *netdev,
}
static int
+netdev_dpdk_vdpa_get_custom_stats(const struct netdev *netdev,
+ struct netdev_custom_stats *custom_stats)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ int err = 0;
+
+ ovs_mutex_lock(&dev->mutex);
+
+ custom_stats->size = VDPA_CUSTOM_STATS_SIZE;
+ custom_stats->counters = xcalloc(custom_stats->size,
+ sizeof *custom_stats->counters);
+ err = netdev_dpdk_vdpa_get_custom_stats_impl(dev->relay,
+ custom_stats);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_vdpa_get_custom_stats_impl failed."
+ "Port %s\n", netdev->name);
+ }
+
+ ovs_mutex_unlock(&dev->mutex);
+ return err;
+}
+
+static int
netdev_dpdk_get_features(const struct netdev *netdev,
enum netdev_features *current,
enum netdev_features *advertised,
@@ -4237,6 +4361,31 @@ netdev_dpdk_vhost_reconfigure(struct netdev *netdev)
}
static int
+netdev_dpdk_vdpa_reconfigure(struct netdev *netdev)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ int err;
+
+ err = netdev_dpdk_reconfigure(netdev);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_reconfigure failed. Port %s", netdev->name);
+ goto out;
+ }
+
+ ovs_mutex_lock(&dev->mutex);
+ err = netdev_dpdk_vdpa_update_relay(dev->relay, dev->dpdk_mp->mp,
+ dev->up.n_rxq);
+ if (err) {
+ VLOG_ERR("netdev_dpdk_vdpa_update_relay failed. Port %s",
+ netdev->name);
+ }
+
+ ovs_mutex_unlock(&dev->mutex);
+out:
+ return err;
+}
+
+static int
netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
@@ -4456,6 +4605,18 @@ static const struct netdev_class dpdk_vhost_client_class = {
.rxq_enabled = netdev_dpdk_vhost_rxq_enabled,
};
+static const struct netdev_class dpdk_vdpa_class = {
+ .type = "dpdkvdpa",
+ NETDEV_DPDK_CLASS_COMMON,
+ .construct = netdev_dpdk_vdpa_construct,
+ .destruct = netdev_dpdk_vdpa_destruct,
+ .rxq_recv = netdev_dpdk_vdpa_rxq_recv,
+ .set_config = netdev_dpdk_vdpa_set_config,
+ .reconfigure = netdev_dpdk_vdpa_reconfigure,
+ .get_custom_stats = netdev_dpdk_vdpa_get_custom_stats,
+ .send = netdev_dpdk_eth_send
+};
+
void
netdev_dpdk_register(void)
{
@@ -4463,4 +4624,5 @@ netdev_dpdk_register(void)
netdev_register_provider(&dpdk_ring_class);
netdev_register_provider(&dpdk_vhost_class);
netdev_register_provider(&dpdk_vhost_client_class);
+ netdev_register_provider(&dpdk_vdpa_class);
}
diff --git a/vswitchd/vswitch.xml b/vswitchd/vswitch.xml
index 9a743c0..9e94950 100644
--- a/vswitchd/vswitch.xml
+++ b/vswitchd/vswitch.xml
@@ -2640,6 +2640,13 @@
<dd>
A pair of virtual devices that act as a patch cable.
</dd>
+
+ <dt><code>dpdkvdpa</code></dt>
+ <dd>
+ The dpdk vDPA port allows forwarding bi-directional traffic between
+ SR-IOV virtual functions (VFs) and VirtIO devices in virtual
+ machines (VMs).
+ </dd>
</dl>
</column>
</group>
@@ -3156,6 +3163,24 @@ ovs-vsctl add-port br0 p0 -- set Interface p0 type=patch options:peer=p1 \
</p>
</column>
+ <column name="options" key="vdpa-socket-path"
+ type='{"type": "string"}'>
+ <p>
+ The value specifies the path to the socket associated with a VDPA
+ port that will be created by QEMU.
+ Only supported by dpdkvdpa interfaces.
+ </p>
+ </column>
+
+ <column name="options" key="vdpa-accelerator-devargs"
+ type='{"type": "string"}'>
+ <p>
+ The value specifies the PCI address associated with the virtual
+ function.
+ Only supported by dpdkvdpa interfaces.
+ </p>
+ </column>
+
<column name="options" key="dq-zero-copy"
type='{"type": "boolean"}'>
<p>
--
1.8.3.1
More information about the dev
mailing list