[ovs-dev] [PATCH 05/11] ovs/dp-cls: free HW pipeline

Shachar Beiser shacharbe at mellanox.com
Wed Jul 5 12:27:12 UTC 2017


The HW pipeline is made of 3 entites: dp-cls thread, a pool of flow tags
and a message queue between the pmd context and dp-cls offload thread.
This patch frees those 3 entities.

Signed-off-by: Shachar Beiser <shacharbe at mellanox.com>
---
 lib/dpif-netdev.c |  7 +++++--
 lib/hw-pipeline.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 65 insertions(+), 2 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index ef3083b..b02edfc 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -1135,6 +1135,9 @@ dp_netdev_free(struct dp_netdev *dp)
         ovs_mutex_destroy(&dp->meter_locks[i]);
     }
 
+    if (dp->ppl_md.id == HW_OFFLOAD_PIPELINE) {
+        hw_pipeline_uninit(dp);
+    }
     free(dp->pmd_cmask);
     free(CONST_CAST(char *, dp->name));
     free(dp);
@@ -4633,12 +4636,12 @@ dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
 
     /* All the flow batches need to be reset before any call to
      * packet_batch_per_flow_execute() as it could potentially trigger
-     * recirculation. When a packet matching flow ‘j’ happens to be
+     * recirculation. When a packet matching flow 'j' happens to be
      * recirculated, the nested call to dp_netdev_input__() could potentially
      * classify the packet as matching another flow - say 'k'. It could happen
      * that in the previous call to dp_netdev_input__() that same flow 'k' had
      * already its own batches[k] still waiting to be served.  So if its
-     * ‘batch’ member is not reset, the recirculated packet would be wrongly
+     * 'batch' member is not reset, the recirculated packet would be wrongly
      * appended to batches[k] of the 1st call to dp_netdev_input__(). */
     size_t i;
     for (i = 0; i < n_batches; i++) {
diff --git a/lib/hw-pipeline.c b/lib/hw-pipeline.c
index 1720c12..24045ed 100644
--- a/lib/hw-pipeline.c
+++ b/lib/hw-pipeline.c
@@ -39,10 +39,12 @@ VLOG_DEFINE_THIS_MODULE(hw_pipeline);
 // Internal functions Flow Tags Pool
 
 uint32_t hw_pipeline_ft_pool_init(flow_tag_pool *p,uint32_t pool_size);
+uint32_t hw_pipeline_ft_pool_uninit(flow_tag_pool *p);
 // Internal functions Message Queue
 
 static int hw_pipeline_msg_queue_init(msg_queue *message_queue,
                                       unsigned core_id);
+static int hw_pipeline_msg_queue_clear(msg_queue *message_queue);
 
 void *hw_pipeline_thread(void *pdp);
 
@@ -78,6 +80,24 @@ uint32_t hw_pipeline_ft_pool_init(flow_tag_pool *p,
     return 0;
 }
 
+uint32_t hw_pipeline_ft_pool_uninit(flow_tag_pool *p)
+{
+    uint32_t ii=0;
+    if (OVS_UNLIKELY(p==NULL||p->ft_data==NULL)) {
+        VLOG_ERR("No pool or no data allocated \n");
+        return -1;
+    }
+    rte_spinlock_lock(&p->lock);
+    p->head=0;
+    p->tail=0;
+    for (ii=0; ii < p->pool_size; ii++) {
+        p->ft_data[ii].next = 0;
+        p->ft_data[ii].valid=false;
+    }
+    free(p->ft_data);
+    rte_spinlock_unlock(&p->lock);
+    return 0;
+}
 /*************************************************************************/
 // Msg Queue
 //  A queue that contains pairs : (flow , key )
@@ -146,6 +166,28 @@ static int hw_pipeline_msg_queue_init(msg_queue *message_queue,
     return 0;
 }
 
+static int hw_pipeline_msg_queue_clear(msg_queue *message_queue)
+{
+    int ret =0;
+    ret = close(message_queue->readFd);
+    if (OVS_UNLIKELY( ret == -1 )) {
+        VLOG_ERR("Error while closing the read file descriptor.");
+        return -1;
+    }
+    ret = close(message_queue->writeFd);
+    if (OVS_UNLIKELY( ret == -1 )) {
+        VLOG_ERR("Error while closing the write file descriptor.");
+        return -1;
+    }
+
+    ret = unlink(message_queue->pipeName);
+    if (OVS_UNLIKELY( ret < 0 )) {
+        VLOG_ERR("Remove fifo failed .\n");
+        return -1;
+    }
+
+    return 0;
+}
 void *hw_pipeline_thread(void *pdp)
 {
     struct dp_netdev *dp= (struct dp_netdev *)pdp;
@@ -181,3 +223,21 @@ int hw_pipeline_init(struct dp_netdev *dp)
     dp->ppl_md.id = HW_OFFLOAD_PIPELINE;
     return 0;
 }
+
+int hw_pipeline_uninit(struct dp_netdev *dp)
+{
+    int ret=0;
+    ret = hw_pipeline_ft_pool_uninit(&dp->ft_pool);
+    if (OVS_UNLIKELY( ret != 0 )) {
+        VLOG_ERR(" hw_pipeline_ft_pool_uninit failed \n");
+        return ret;
+    }
+    ret = hw_pipeline_msg_queue_clear(&dp->message_queue);
+    if (OVS_UNLIKELY( ret != 0 )) {
+        VLOG_ERR(" hw_pipeline_msg_queue_clear failed \n");
+        return ret;
+    }
+    xpthread_join(dp->thread_ofload, NULL);
+    dp->ppl_md.id = DEFAULT_SW_PIPELINE;
+    return 0;
+}
-- 
1.8.3.1



More information about the dev mailing list