[ovs-dev] [RFC PATCH 08/26] dpif-netdev: Rename offload thread structure
Gaetan Rivet
grive at u256.net
Sat Dec 5 14:22:03 UTC 2020
The offload management in userspace is done through a separate thread.
The naming of the structure holding the objects used for synchronization
with the dataplane is generic and nondescript.
Clarify the object function by renaming it.
Signed-off-by: Gaetan Rivet <grive at u256.net>
---
lib/dpif-netdev.c | 58 +++++++++++++++++++++++------------------------
1 file changed, 29 insertions(+), 29 deletions(-)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 4cc6492a1..e8156cd57 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -421,7 +421,7 @@ enum {
DP_NETDEV_FLOW_OFFLOAD_OP_DEL,
};
-struct dp_flow_offload_item {
+struct dp_offload_thread_item {
struct dp_netdev_pmd_thread *pmd;
struct dp_netdev_flow *flow;
int op;
@@ -432,16 +432,16 @@ struct dp_flow_offload_item {
struct ovs_list node;
};
-struct dp_flow_offload {
+struct dp_offload_thread {
struct ovs_mutex mutex;
struct ovs_list list;
uint64_t enqueued_item;
pthread_cond_t cond;
};
-static struct dp_flow_offload dp_flow_offload = {
+static struct dp_offload_thread dp_offload_thread = {
.mutex = OVS_MUTEX_INITIALIZER,
- .list = OVS_LIST_INITIALIZER(&dp_flow_offload.list),
+ .list = OVS_LIST_INITIALIZER(&dp_offload_thread.list),
.enqueued_item = 0,
};
@@ -2596,12 +2596,12 @@ mark_to_flow_find(const struct dp_netdev_pmd_thread *pmd,
return NULL;
}
-static struct dp_flow_offload_item *
+static struct dp_offload_thread_item *
dp_netdev_alloc_flow_offload(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_flow *flow,
int op)
{
- struct dp_flow_offload_item *offload;
+ struct dp_offload_thread_item *offload;
offload = xzalloc(sizeof(*offload));
offload->pmd = pmd;
@@ -2615,7 +2615,7 @@ dp_netdev_alloc_flow_offload(struct dp_netdev_pmd_thread *pmd,
}
static void
-dp_netdev_free_flow_offload(struct dp_flow_offload_item *offload)
+dp_netdev_free_flow_offload(struct dp_offload_thread_item *offload)
{
dp_netdev_pmd_unref(offload->pmd);
dp_netdev_flow_unref(offload->flow);
@@ -2625,17 +2625,17 @@ dp_netdev_free_flow_offload(struct dp_flow_offload_item *offload)
}
static void
-dp_netdev_append_flow_offload(struct dp_flow_offload_item *offload)
+dp_netdev_append_flow_offload(struct dp_offload_thread_item *offload)
{
- ovs_mutex_lock(&dp_flow_offload.mutex);
- ovs_list_push_back(&dp_flow_offload.list, &offload->node);
- dp_flow_offload.enqueued_item++;
- xpthread_cond_signal(&dp_flow_offload.cond);
- ovs_mutex_unlock(&dp_flow_offload.mutex);
+ ovs_mutex_lock(&dp_offload_thread.mutex);
+ ovs_list_push_back(&dp_offload_thread.list, &offload->node);
+ dp_offload_thread.enqueued_item++;
+ xpthread_cond_signal(&dp_offload_thread.cond);
+ ovs_mutex_unlock(&dp_offload_thread.mutex);
}
static int
-dp_netdev_flow_offload_del(struct dp_flow_offload_item *offload)
+dp_netdev_flow_offload_del(struct dp_offload_thread_item *offload)
{
return mark_to_flow_disassociate(offload->pmd, offload->flow);
}
@@ -2652,7 +2652,7 @@ dp_netdev_flow_offload_del(struct dp_flow_offload_item *offload)
* valid, thus only item 2 needed.
*/
static int
-dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload)
+dp_netdev_flow_offload_put(struct dp_offload_thread_item *offload)
{
struct dp_netdev_pmd_thread *pmd = offload->pmd;
struct dp_netdev_flow *flow = offload->flow;
@@ -2732,23 +2732,23 @@ err_free:
static void *
dp_netdev_flow_offload_main(void *data OVS_UNUSED)
{
- struct dp_flow_offload_item *offload;
+ struct dp_offload_thread_item *offload;
struct ovs_list *list;
const char *op;
int ret;
for (;;) {
- ovs_mutex_lock(&dp_flow_offload.mutex);
- if (ovs_list_is_empty(&dp_flow_offload.list)) {
+ ovs_mutex_lock(&dp_offload_thread.mutex);
+ if (ovs_list_is_empty(&dp_offload_thread.list)) {
ovsrcu_quiesce_start();
- ovs_mutex_cond_wait(&dp_flow_offload.cond,
- &dp_flow_offload.mutex);
+ ovs_mutex_cond_wait(&dp_offload_thread.cond,
+ &dp_offload_thread.mutex);
ovsrcu_quiesce_end();
}
- list = ovs_list_pop_front(&dp_flow_offload.list);
- dp_flow_offload.enqueued_item--;
- offload = CONTAINER_OF(list, struct dp_flow_offload_item, node);
- ovs_mutex_unlock(&dp_flow_offload.mutex);
+ list = ovs_list_pop_front(&dp_offload_thread.list);
+ dp_offload_thread.enqueued_item--;
+ offload = CONTAINER_OF(list, struct dp_offload_thread_item, node);
+ ovs_mutex_unlock(&dp_offload_thread.mutex);
switch (offload->op) {
case DP_NETDEV_FLOW_OFFLOAD_OP_ADD:
@@ -2781,10 +2781,10 @@ static void
queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_flow *flow)
{
- struct dp_flow_offload_item *offload;
+ struct dp_offload_thread_item *offload;
if (ovsthread_once_start(&offload_thread_once)) {
- xpthread_cond_init(&dp_flow_offload.cond, NULL);
+ xpthread_cond_init(&dp_offload_thread.cond, NULL);
ovs_thread_create("hw_offload", dp_netdev_flow_offload_main, NULL);
ovsthread_once_done(&offload_thread_once);
}
@@ -2799,7 +2799,7 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_flow *flow, struct match *match,
const struct nlattr *actions, size_t actions_len)
{
- struct dp_flow_offload_item *offload;
+ struct dp_offload_thread_item *offload;
int op;
if (!netdev_is_flow_api_enabled()) {
@@ -2807,7 +2807,7 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd,
}
if (ovsthread_once_start(&offload_thread_once)) {
- xpthread_cond_init(&dp_flow_offload.cond, NULL);
+ xpthread_cond_init(&dp_offload_thread.cond, NULL);
ovs_thread_create("hw_offload", dp_netdev_flow_offload_main, NULL);
ovsthread_once_done(&offload_thread_once);
}
@@ -4237,7 +4237,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
ovs_mutex_unlock(&dp->port_mutex);
stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value =
- dp_flow_offload.enqueued_item;
+ dp_offload_thread.enqueued_item;
stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_INSERTED].value = nb_offloads;
for (i = 0; i < ARRAY_SIZE(names); i++) {
--
2.29.2
More information about the dev
mailing list