[ovs-dev] [PATCH v2] netdev-dpdk: Free mempool only when no in-use mbufs.

Kevin Traynor ktraynor at redhat.com
Fri Apr 13 17:40:13 UTC 2018


DPDK mempools are freed when they are no longer needed.
This can happen when a port is removed or a port's mtu
is reconfigured so that a new mempool is used.

It is possible that an mbuf is attempted to be returned
to a freed mempool from NIC Tx queues and this can lead
to a segfault.

In order to prevent this, only free mempools when they
are not needed and have no in-use mbufs. As this might
not be possible immediately, create a free list of
mempools and sweep it anytime a port tries to get a
mempool.

Fixes: 8d38823bdf8b ("netdev-dpdk: fix memory leak")
Cc: mark.b.kavanagh81 at gmail.com
Cc: Ilya Maximets <i.maximets at samsung.com>
Reported-by: Venkatesan Pradeep <venkatesan.pradeep at ericsson.com>
Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
---

v2: Get number of entries on the mempool ring directly.

 lib/netdev-dpdk.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 81 insertions(+), 5 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index ee39cbe..3306b19 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -297,4 +297,14 @@ static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex)
     = OVS_MUTEX_INITIALIZER;
 
+/* Contains all 'struct dpdk_mp's. */
+static struct ovs_list dpdk_mp_free_list OVS_GUARDED_BY(dpdk_mp_mutex)
+    = OVS_LIST_INITIALIZER(&dpdk_mp_free_list);
+
+/* Wrapper for a mempool released but not yet freed. */
+struct dpdk_mp {
+     struct rte_mempool *mp;
+     struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
+ };
+
 /* There should be one 'struct dpdk_tx_queue' created for
  * each cpu core. */
@@ -512,4 +522,56 @@ ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
 }
 
+static int
+dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex)
+{
+    unsigned ring_count;
+    /* This logic is needed because rte_mempool_full() is not guaranteed to
+     * be atomic and mbufs could be moved from mempool cache --> mempool ring
+     * during the call. However, as no mbufs will be taken from the mempool
+     * at this time, we can work around it by also checking the ring entries
+     * separately and ensuring that they have not changed.
+     */
+    ring_count = rte_mempool_ops_get_count(mp);
+    if (rte_mempool_full(mp) && rte_mempool_ops_get_count(mp) == ring_count) {
+        return 1;
+    }
+
+    return 0;
+}
+
+/* Free unused mempools. */
+static void
+dpdk_mp_sweep(void)
+{
+    struct dpdk_mp *dmp, *next;
+
+    ovs_mutex_lock(&dpdk_mp_mutex);
+    LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_free_list) {
+        if (dpdk_mp_full(dmp->mp)) {
+            VLOG_DBG("Freeing mempool \"%s\"", dmp->mp->name);
+            ovs_list_remove(&dmp->list_node);
+            rte_mempool_free(dmp->mp);
+            rte_free(dmp);
+        }
+    }
+    ovs_mutex_unlock(&dpdk_mp_mutex);
+}
+
+/* Ensure a mempool will not be freed. */
+static void
+dpdk_mp_do_not_free(struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex)
+{
+    struct dpdk_mp *dmp, *next;
+
+    LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_free_list) {
+        if (dmp->mp == mp) {
+            VLOG_DBG("Removing mempool \"%s\" from free list", dmp->mp->name);
+            ovs_list_remove(&dmp->list_node);
+            rte_free(dmp);
+            break;
+        }
+    }
+}
+
 /* Returns a valid pointer when either of the following is true:
  *  - a new mempool was just created;
@@ -578,4 +640,6 @@ dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
             VLOG_DBG("A mempool with name \"%s\" already exists at %p.",
                      mp_name, mp);
+            /* Ensure this reused mempool will not be freed. */
+            dpdk_mp_do_not_free(mp);
         } else {
             VLOG_ERR("Failed mempool \"%s\" create request of %u mbufs",
@@ -590,5 +654,5 @@ dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
 /* Release an existing mempool. */
 static void
-dpdk_mp_free(struct rte_mempool *mp)
+dpdk_mp_release(struct rte_mempool *mp)
 {
     if (!mp) {
@@ -597,6 +661,16 @@ dpdk_mp_free(struct rte_mempool *mp)
 
     ovs_mutex_lock(&dpdk_mp_mutex);
-    VLOG_DBG("Releasing \"%s\" mempool", mp->name);
-    rte_mempool_free(mp);
+    if (dpdk_mp_full(mp)) {
+        VLOG_DBG("Freeing mempool \"%s\"", mp->name);
+        rte_mempool_free(mp);
+    } else {
+        struct dpdk_mp *dmp;
+
+        dmp = dpdk_rte_mzalloc(sizeof *dmp);
+        if (dmp) {
+            dmp->mp = mp;
+            ovs_list_push_back(&dpdk_mp_free_list, &dmp->list_node);
+        }
+    }
     ovs_mutex_unlock(&dpdk_mp_mutex);
 }
@@ -616,4 +690,6 @@ netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
     int ret = 0;
 
+    dpdk_mp_sweep();
+
     mp = dpdk_mp_create(dev, FRAME_LEN_TO_MTU(buf_size));
     if (!mp) {
@@ -628,5 +704,5 @@ netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
         if (dev->mp != mp) {
             /* A new mempool was created, release the previous one. */
-            dpdk_mp_free(dev->mp);
+            dpdk_mp_release(dev->mp);
         } else {
             ret = EEXIST;
@@ -1083,5 +1159,5 @@ common_destruct(struct netdev_dpdk *dev)
 {
     rte_free(dev->tx_q);
-    dpdk_mp_free(dev->mp);
+    dpdk_mp_release(dev->mp);
 
     ovs_list_remove(&dev->list_node);
-- 
1.8.3.1



More information about the dev mailing list