[ovs-dev] [PATCH 2/2] dpif-netdev: Allow direct destroy of 'struct dp_netdev_port'.

Alex Wang alexw at nicira.com
Fri Nov 7 22:51:43 UTC 2014


Before this commit, when 'struct dp_netdev_port' is deleted from
'dpif-netdev' datapath, if there is pmd thread, the pmd thread
will release the last reference to the port and ovs-rcu postpone
the destroy.  However, the delayed close of object like 'struct
netdev' could cause failure in immediate re-add or reconfigure of
the same device.

To fix the above issue, this commit uses condition variable and
makes the main thread wait for pmd thread to release the reference
when deleting port.  Then, the main thread can directly destroy the
port.

Reported-by: Cian Ferriter <cian.ferriter at intel.com>
Signed-off-by: Alex Wang <alexw at nicira.com>
---
 lib/dpif-netdev.c |   61 ++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 42 insertions(+), 19 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index bee9330..ad2febc 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -370,6 +370,10 @@ static void dp_netdev_actions_free(struct dp_netdev_actions *);
 struct dp_netdev_pmd_thread {
     struct dp_netdev *dp;
     struct cmap_node node;          /* In 'dp->poll_threads'. */
+
+    pthread_cond_t cond;            /* For synchronizing pmd thread reload. */
+    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
+
     /* Per thread exact-match cache.  Note, the instance for cpu core
      * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
      * need to be protected (e.g. by 'dp_netdev_mutex').  All other
@@ -416,6 +420,7 @@ static void dp_netdev_input(struct dp_netdev_pmd_thread *,
                             struct dpif_packet **, int cnt);
 
 static void dp_netdev_disable_upcall(struct dp_netdev *);
+void dp_netdev_pmd_signal_cond(struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
                                     struct dp_netdev *dp, int index,
                                     int core_id, int numa_id);
@@ -761,7 +766,14 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
 {
     int old_seq;
 
+    if (pmd->core_id == NON_PMD_CORE_ID) {
+        return;
+    }
+
+    ovs_mutex_lock(&pmd->cond_mutex);
     atomic_add_relaxed(&pmd->change_seq, 1, &old_seq);
+    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
+    ovs_mutex_unlock(&pmd->cond_mutex);
 }
 
 /* Causes all pmd threads to reload its tx/rx devices.
@@ -972,27 +984,21 @@ port_try_ref(struct dp_netdev_port *port)
 }
 
 static void
-port_destroy__(struct dp_netdev_port *port)
-{
-    int n_rxq = netdev_n_rxq(port->netdev);
-    int i;
-
-    netdev_close(port->netdev);
-    netdev_restore_flags(port->sf);
-
-    for (i = 0; i < n_rxq; i++) {
-        netdev_rxq_close(port->rxq[i]);
-    }
-    free(port->rxq);
-    free(port->type);
-    free(port);
-}
-
-static void
 port_unref(struct dp_netdev_port *port)
 {
     if (port && ovs_refcount_unref_relaxed(&port->ref_cnt) == 1) {
-        ovsrcu_postpone(port_destroy__, port);
+        int n_rxq = netdev_n_rxq(port->netdev);
+        int i;
+
+        netdev_close(port->netdev);
+        netdev_restore_flags(port->sf);
+
+        for (i = 0; i < n_rxq; i++) {
+            netdev_rxq_close(port->rxq[i]);
+        }
+        free(port->rxq);
+        free(port->type);
+        free(port);
     }
 }
 
@@ -2296,6 +2302,10 @@ reload:
     emc_cache_init(&pmd->flow_cache);
     poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt);
 
+    /* Signal here to make sure the pmd finishes
+     * reloading the updated configuration. */
+    dp_netdev_pmd_signal_cond(pmd);
+
     for (;;) {
         int i;
 
@@ -2317,7 +2327,6 @@ reload:
             }
         }
     }
-
     emc_cache_uninit(&pmd->flow_cache);
 
     if (!latch_is_set(&pmd->exit_latch)){
@@ -2328,6 +2337,8 @@ reload:
          port_unref(poll_list[i].port);
     }
 
+    dp_netdev_pmd_signal_cond(pmd);
+
     free(poll_list);
     return NULL;
 }
@@ -2362,6 +2373,14 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
     dp_netdev_enable_upcall(dp);
 }
 
+void
+dp_netdev_pmd_signal_cond(struct dp_netdev_pmd_thread *pmd)
+{
+    ovs_mutex_lock(&pmd->cond_mutex);
+    xpthread_cond_signal(&pmd->cond);
+    ovs_mutex_unlock(&pmd->cond_mutex);
+}
+
 /* Returns the pointer to the dp_netdev_pmd_thread for non-pmd threads. */
 static struct dp_netdev_pmd_thread *
 dp_netdev_get_nonpmd(struct dp_netdev *dp)
@@ -2398,6 +2417,8 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
     pmd->numa_id = numa_id;
     latch_init(&pmd->exit_latch);
     atomic_init(&pmd->change_seq, PMD_INITIAL_SEQ);
+    xpthread_cond_init(&pmd->cond, NULL);
+    ovs_mutex_init(&pmd->cond_mutex);
     /* init the 'flow_cache' since there is no
      * actual thread created for NON_PMD_CORE_ID. */
     if (core_id == NON_PMD_CORE_ID) {
@@ -2424,6 +2445,8 @@ dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd)
     }
     cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
     latch_destroy(&pmd->exit_latch);
+    xpthread_cond_destroy(&pmd->cond);
+    ovs_mutex_destroy(&pmd->cond_mutex);
     free(pmd);
 }
 
-- 
1.7.9.5




More information about the dev mailing list