[ovs-dev] [RFC PATCH 20/26] netdev-offload-dpdk: Lock rte_flow map access

Gaetan Rivet grive at u256.net
Sat Dec 5 14:22:15 UTC 2020


Add a lock to access the ufid to rte_flow map.  This will protect it
from concurrent write accesses when multiple threads attempt it.

At this point, the reason for taking the lock is not to fullfill the
needs of the DPDK offload implementation anymore. Rewrite the comments
to reflect this change. The lock is still needed to protect against
changes to netdev port mapping.

Signed-off-by: Gaetan Rivet <grive at u256.net>
---
 lib/dpif-netdev.c         |  8 ++---
 lib/netdev-offload-dpdk.c | 76 +++++++++++++++++++++++++++++++--------
 2 files changed, 66 insertions(+), 18 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index daf8fb249..4eae34893 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -2562,7 +2562,7 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
         port = netdev_ports_get(in_port, dpif_type_str);
         if (port) {
             /* Taking a global 'port_mutex' to fulfill thread safety
-             * restrictions for the netdev-offload-dpdk module. */
+             * restrictions regarding netdev port mapping. */
             ovs_mutex_lock(&pmd->dp->port_mutex);
             ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
             ovs_mutex_unlock(&pmd->dp->port_mutex);
@@ -2719,8 +2719,8 @@ dp_netdev_flow_offload_put(struct dp_offload_thread_item *offload)
         netdev_close(port);
         goto err_free;
     }
-    /* Taking a global 'port_mutex' to fulfill thread safety restrictions for
-     * the netdev-offload-dpdk module. */
+    /* Taking a global 'port_mutex' to fulfill thread safety
+     * restrictions regarding the netdev port mapping. */
     ovs_mutex_lock(&pmd->dp->port_mutex);
     ret = netdev_flow_put(port, &offload->match,
                           CONST_CAST(struct nlattr *, offload->actions),
@@ -3402,7 +3402,7 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
     }
     ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
     /* Taking a global 'port_mutex' to fulfill thread safety
-     * restrictions for the netdev-offload-dpdk module.
+     * restrictions regarding netdev port mapping.
      *
      * XXX: Main thread will try to pause/stop all revalidators during datapath
      *      reconfiguration via datapath purge callback (dp_purge_cb) while
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 48cf6d696..5bc67254c 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -37,9 +37,6 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
  *
  * Below API is NOT thread safe in following terms:
  *
- *  - The caller must be sure that none of these functions will be called
- *    simultaneously.  Even for different 'netdev's.
- *
  *  - The caller must be sure that 'netdev' will not be destructed/deallocated.
  *
  *  - The caller must be sure that 'netdev' configuration will not be changed.
@@ -64,6 +61,7 @@ struct ufid_to_rte_flow_data {
 struct netdev_offload_dpdk_data {
     struct cmap ufid_to_rte_flow;
     uint64_t *rte_flow_counters;
+    struct ovs_mutex map_lock;
 };
 
 static int
@@ -72,6 +70,7 @@ offload_data_init(struct netdev *netdev)
     struct netdev_offload_dpdk_data *data;
 
     data = xzalloc(sizeof *data);
+    ovs_mutex_init(&data->map_lock);
     cmap_init(&data->ufid_to_rte_flow);
     data->rte_flow_counters = xcalloc(netdev_offload_thread_nb(),
                                       sizeof *data->rte_flow_counters);
@@ -97,12 +96,33 @@ offload_data_destroy(struct netdev *netdev)
     }
 
     cmap_destroy(&data->ufid_to_rte_flow);
+    ovs_mutex_destroy(&data->map_lock);
     free(data->rte_flow_counters);
     free(data);
 
     netdev->hw_info.offload_data = NULL;
 }
 
+static void
+offload_data_lock(struct netdev *netdev)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct netdev_offload_dpdk_data *data;
+
+    data = netdev->hw_info.offload_data;
+    ovs_mutex_lock(&data->map_lock);
+}
+
+static void
+offload_data_unlock(struct netdev *netdev)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct netdev_offload_dpdk_data *data;
+
+    data = netdev->hw_info.offload_data;
+    ovs_mutex_unlock(&data->map_lock);
+}
+
 static struct cmap *
 offload_data_map(struct netdev *netdev)
 {
@@ -130,6 +150,24 @@ ufid_to_rte_flow_data_find(struct netdev *netdev,
     return NULL;
 }
 
+/* Find rte_flow with @ufid, lock-protected. */
+static struct ufid_to_rte_flow_data *
+ufid_to_rte_flow_data_find_protected(struct netdev *netdev,
+                                     const ovs_u128 *ufid)
+{
+    size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
+    struct ufid_to_rte_flow_data *data;
+    struct cmap *map = offload_data_map(netdev);
+
+    CMAP_FOR_EACH_WITH_HASH_PROTECTED (data, node, hash, map) {
+        if (ovs_u128_equals(*ufid, data->ufid)) {
+            return data;
+        }
+    }
+
+    return NULL;
+}
+
 static inline struct ufid_to_rte_flow_data *
 ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
                            struct rte_flow *rte_flow, bool actions_offloaded)
@@ -139,13 +177,15 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
     struct ufid_to_rte_flow_data *data_prev;
     struct cmap *map = offload_data_map(netdev);
 
+    offload_data_lock(netdev);
+
     /*
      * We should not simply overwrite an existing rte flow.
      * We should have deleted it first before re-adding it.
      * Thus, if following assert triggers, something is wrong:
      * the rte_flow is not destroyed.
      */
-    data_prev = ufid_to_rte_flow_data_find(netdev, ufid);
+    data_prev = ufid_to_rte_flow_data_find_protected(netdev, ufid);
     if (data_prev) {
         ovs_assert(data_prev->rte_flow == NULL);
     }
@@ -155,6 +195,9 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
     data->actions_offloaded = actions_offloaded;
 
     cmap_insert(map, CONST_CAST(struct cmap_node *, &data->node), hash);
+
+    offload_data_unlock(netdev);
+
     return data;
 }
 
@@ -163,20 +206,25 @@ ufid_to_rte_flow_disassociate(struct netdev *netdev,
                               const ovs_u128 *ufid)
 {
     struct cmap *map = offload_data_map(netdev);
-    size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
     struct ufid_to_rte_flow_data *data;
+    size_t hash;
 
-    CMAP_FOR_EACH_WITH_HASH (data, node, hash, map) {
-        if (ovs_u128_equals(*ufid, data->ufid)) {
-            cmap_remove(map, CONST_CAST(struct cmap_node *, &data->node),
-                        hash);
-            ovsrcu_postpone(free, data);
-            return;
-        }
+    offload_data_lock(netdev);
+
+    data = ufid_to_rte_flow_data_find_protected(netdev, ufid);
+    if (!data) {
+        offload_data_unlock(netdev);
+        VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow",
+                  UUID_ARGS((struct uuid *) ufid));
+        return;
     }
+    hash = hash_bytes(ufid, sizeof *ufid, 0);
+    cmap_remove(map, CONST_CAST(struct cmap_node *, &data->node),
+                hash);
+
+    offload_data_unlock(netdev);
 
-    VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow",
-              UUID_ARGS((struct uuid *) ufid));
+    ovsrcu_postpone(free, data);
 }
 
 /*
-- 
2.29.2



More information about the dev mailing list