[ovs-dev] [PATCH v4] netdev-dpdk: leverage the mempool offload support
Ben Pfaff
blp at ovn.org
Wed May 3 19:38:58 UTC 2017
Thanks everyone! I applied this to master.
On Fri, Apr 14, 2017 at 10:36:54PM +0000, Darrell Ball wrote:
> Acked-by: Darrell Ball <dlu998 at gmail.com>
>
> On 4/12/17, 10:31 PM, "Hemant Agrawal" <hemant.agrawal at nxp.com> wrote:
>
> DPDK 16.07 introduced the support for mempool offload support.
> rte_pktmbuf_pool_create is the recommended method for creating pktmbuf
> pools. Buffer pools created with rte_mempool_create may not get offloaded
> to the underlying offloaded mempools.
>
> This patch, changes the rte_mempool_create to use helper wrapper
> "rte_pktmbuf_pool_create" provided by dpdk, so that it can leverage
> offloaded mempools.
>
> Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
> Acked-by: Jianbo Liu <jianbo.liu at linaro.org>
> Acked-by: Kevin Traynor <ktraynor at redhat.com>
> ---
> v4:
> * fix the comment as suggested
> v3:
> * adding OVS_UNUSED for mp parameter
> v2:
> * removing rte_pktmbuf_init as per review comment
>
> lib/netdev-dpdk.c | 26 +++++++++++++-------------
> 1 file changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index ddc651b..9fa60fd 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -451,22 +451,19 @@ free_dpdk_buf(struct dp_packet *p)
> }
>
> static void
> -ovs_rte_pktmbuf_init(struct rte_mempool *mp,
> +ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
> void *opaque_arg OVS_UNUSED,
> void *_p,
> unsigned i OVS_UNUSED)
> {
> struct rte_mbuf *pkt = _p;
>
> - rte_pktmbuf_init(mp, opaque_arg, _p, i);
> -
> dp_packet_init_dpdk((struct dp_packet *) pkt, pkt->buf_len);
> }
>
> static struct dpdk_mp *
> dpdk_mp_create(int socket_id, int mtu)
> {
> - struct rte_pktmbuf_pool_private mbp_priv;
> struct dpdk_mp *dmp;
> unsigned mp_size;
> char *mp_name;
> @@ -478,9 +475,6 @@ dpdk_mp_create(int socket_id, int mtu)
> dmp->socket_id = socket_id;
> dmp->mtu = mtu;
> dmp->refcount = 1;
> - mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
> - mbp_priv.mbuf_priv_size = sizeof(struct dp_packet)
> - - sizeof(struct rte_mbuf);
> /* XXX: this is a really rough method of provisioning memory.
> * It's impossible to determine what the exact memory requirements are
> * when the number of ports and rxqs that utilize a particular mempool can
> @@ -496,18 +490,24 @@ dpdk_mp_create(int socket_id, int mtu)
> mp_name = xasprintf("ovs_mp_%d_%d_%u", dmp->mtu, dmp->socket_id,
> mp_size);
>
> - dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
> - MP_CACHE_SZ,
> - sizeof(struct rte_pktmbuf_pool_private),
> - rte_pktmbuf_pool_init, &mbp_priv,
> - ovs_rte_pktmbuf_init, NULL,
> - socket_id, 0);
> + dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
> + MP_CACHE_SZ,
> + sizeof (struct dp_packet)
> + - sizeof (struct rte_mbuf),
> + MBUF_SIZE(mtu)
> + - sizeof(struct dp_packet),
> + socket_id);
> if (dmp->mp) {
> VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
> mp_name, mp_size);
> }
> free(mp_name);
> if (dmp->mp) {
> + /* rte_pktmbuf_pool_create has done some initialization of the
> + * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init
> + * initializes some OVS specific fields of dp_packet.
> + */
> + rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
> return dmp;
> }
> } while (rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
> --
> 1.9.1
>
>
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
More information about the dev
mailing list