[ovs-dev] [PATCH v2] datapath: Implement flow table re-hashing.
Pravin Shelar
pshelar at nicira.com
Thu Dec 8 23:10:31 UTC 2011
There is a typo in function workqueue_barrier() where it inserts wrong
'work' to workq.
Thanks,
Pravin.
On Thu, Dec 8, 2011 at 10:48 AM, Pravin B Shelar <pshelar at nicira.com> wrote:
> Fixed according to comments from Jesse.
> v1-v2:
> - uses genl_mutex to protect flow-hash-table.
> - adds REHASH cmd to dp_datapath_genl_ops so that we can
> rehash flow-table holding genl_mutex.
>
> --8<--------------------------cut here-------------------------->8--
>
> Following patch introduces a timer based event to rehash flow-hash
> table. It makes finding collisions difficult to for an attacker.
>
> Suggested-by: Herbert Xu <herbert at gondor.apana.org.au>
> Signed-off-by: Pravin B Shelar <pshelar at nicira.com>
> ---
> datapath/datapath.c | 84 +++++++++-
> datapath/flow.c | 80 ++++++---
> datapath/flow.h | 6 +-
> datapath/linux/Modules.mk | 3 +-
> datapath/linux/compat/include/linux/workqueue.h | 76 ++++++---
> datapath/linux/compat/workqueue.c | 217 +++++++++++++++++++++++
> include/linux/openvswitch.h | 3 +-
> 7 files changed, 416 insertions(+), 53 deletions(-)
> create mode 100644 datapath/linux/compat/workqueue.c
>
> diff --git a/datapath/datapath.c b/datapath/datapath.c
> index c86c20b..50fd00e 100644
> --- a/datapath/datapath.c
> +++ b/datapath/datapath.c
> @@ -63,6 +63,11 @@
> #error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
> #endif
>
> +#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
> +
> +static void rehash_flow_table(struct work_struct *work);
> +static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
> +
> int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
> EXPORT_SYMBOL(ovs_dp_ioctl_hook);
>
> @@ -1559,6 +1564,31 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
> return skb->len;
> }
>
> +static void __rehash_flow_table(void)
> +{
> + struct datapath *dp;
> +
> + list_for_each_entry(dp, &dps, list_node) {
> + struct flow_table *old_table = genl_dereference(dp->table);
> + struct flow_table *new_table;
> +
> + new_table = ovs_flow_tbl_rehash(old_table);
> +
> + if (!IS_ERR(new_table)) {
> + rcu_assign_pointer(dp->table, new_table);
> + ovs_flow_tbl_deferred_destroy(old_table);
> + }
> + }
> +}
> +
> +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
> +static int ovs_dp_cmd_rehash(struct sk_buff *skb, struct genl_info *info)
> +{
> + __rehash_flow_table();
> + return 0;
> +}
> +#endif
> +
> static struct genl_ops dp_datapath_genl_ops[] = {
> { .cmd = OVS_DP_CMD_NEW,
> .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
> @@ -1581,6 +1611,13 @@ static struct genl_ops dp_datapath_genl_ops[] = {
> .policy = datapath_policy,
> .doit = ovs_dp_cmd_set,
> },
> +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
> + { .cmd = OVS_DP_CMD_REHASH,
> + .flags = 0,
> + .policy = datapath_policy,
> + .doit = ovs_dp_cmd_rehash,
> + },
> +#endif
> };
>
> static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
> @@ -2039,6 +2076,40 @@ error:
> return err;
> }
>
> +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
> +static void rehash_flow_table(struct work_struct *work)
> +{
> + struct nlmsghdr *nlh;
> + struct ovs_header *hdr;
> + struct sk_buff *skb;
> +
> + skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
> + if (!skb)
> + goto out;
> +
> + hdr = genlmsg_put(skb, 0, 0, &dp_datapath_genl_family,
> + 0, OVS_DP_CMD_REHASH);
> + if (!hdr) {
> + kfree_skb(skb);
> + return;
> + }
> +
> + nlh = nlmsg_hdr(skb);
> + nlh->nlmsg_flags |= NLM_F_REQUEST;
> + genlmsg_unicast(&init_net, skb, 0);
> +out:
> + schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
> +}
> +#else
> +static void rehash_flow_table(struct work_struct *work)
> +{
> + genl_lock();
> + __rehash_flow_table();
> + genl_unlock();
> + schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
> +}
> +#endif
> +
> static int __init dp_init(void)
> {
> struct sk_buff *dummy_skb;
> @@ -2049,10 +2120,14 @@ static int __init dp_init(void)
> pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
> VERSION BUILDNR);
>
> - err = ovs_tnl_init();
> + err = ovs_workqueues_init();
> if (err)
> goto error;
>
> + err = ovs_tnl_init();
> + if (err)
> + goto error_wq;
> +
> err = ovs_flow_init();
> if (err)
> goto error_tnl_exit;
> @@ -2069,8 +2144,9 @@ static int __init dp_init(void)
> if (err < 0)
> goto error_unreg_notifier;
>
> - return 0;
> + schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
>
> + return 0;
> error_unreg_notifier:
> unregister_netdevice_notifier(&ovs_dp_device_notifier);
> error_vport_exit:
> @@ -2079,6 +2155,8 @@ error_flow_exit:
> ovs_flow_exit();
> error_tnl_exit:
> ovs_tnl_exit();
> +error_wq:
> + ovs_workqueues_exit();
> error:
> return err;
> }
> @@ -2086,6 +2164,8 @@ error:
> static void dp_cleanup(void)
> {
> rcu_barrier();
> + cancel_delayed_work_sync(&rehash_flow_wq);
> + ovs_workqueues_exit();
> dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
> unregister_netdevice_notifier(&ovs_dp_device_notifier);
> ovs_vport_exit();
> diff --git a/datapath/flow.c b/datapath/flow.c
> index 78dea3a..bb9533a 100644
> --- a/datapath/flow.c
> +++ b/datapath/flow.c
> @@ -47,7 +47,6 @@
> #include "vlan.h"
>
> static struct kmem_cache *flow_cache;
> -static unsigned int hash_seed __read_mostly;
>
> static int check_header(struct sk_buff *skb, int len)
> {
> @@ -238,6 +237,7 @@ struct sw_flow *ovs_flow_alloc(void)
>
> static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
> {
> + hash = jhash_1word(hash, table->hash_seed);
> return flex_array_get(table->buckets,
> (hash & (table->n_buckets - 1)));
> }
> @@ -285,6 +285,9 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size)
> }
> table->n_buckets = new_size;
> table->count = 0;
> + table->node_ver = 0;
> + table->keep_flows = false;
> + get_random_bytes(&table->hash_seed, sizeof(u32));
>
> return table;
> }
> @@ -302,17 +305,22 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
> if (!table)
> return;
>
> + if (table->keep_flows)
> + goto skip_flows;
> +
> for (i = 0; i < table->n_buckets; i++) {
> struct sw_flow *flow;
> struct hlist_head *head = flex_array_get(table->buckets, i);
> struct hlist_node *node, *n;
> + int ver = table->node_ver;
>
> - hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
> - hlist_del_init_rcu(&flow->hash_node);
> + hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
> + hlist_del_rcu(&flow->hash_node[ver]);
> flow_free(flow);
> }
> }
>
> +skip_flows:
> free_buckets(table->buckets);
> kfree(table);
> }
> @@ -337,12 +345,14 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
> struct sw_flow *flow;
> struct hlist_head *head;
> struct hlist_node *n;
> + int ver;
> int i;
>
> + ver = table->node_ver;
> while (*bucket < table->n_buckets) {
> i = 0;
> head = flex_array_get(table->buckets, *bucket);
> - hlist_for_each_entry_rcu(flow, n, head, hash_node) {
> + hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
> if (i < *last) {
> i++;
> continue;
> @@ -357,32 +367,52 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
> return NULL;
> }
>
> -struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
> +static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
> {
> - struct flow_table *new_table;
> - int n_buckets = table->n_buckets * 2;
> + int old_ver;
> int i;
>
> - new_table = ovs_flow_tbl_alloc(n_buckets);
> - if (!new_table)
> - return ERR_PTR(-ENOMEM);
> + old_ver = old->node_ver;
> + new->node_ver = !old_ver;
>
> - for (i = 0; i < table->n_buckets; i++) {
> + /* Insert in new table. */
> + for (i = 0; i < old->n_buckets; i++) {
> struct sw_flow *flow;
> struct hlist_head *head;
> - struct hlist_node *n, *pos;
> + struct hlist_node *n;
>
> - head = flex_array_get(table->buckets, i);
> + head = flex_array_get(old->buckets, i);
>
> - hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
> - hlist_del_init_rcu(&flow->hash_node);
> - ovs_flow_tbl_insert(new_table, flow);
> - }
> + hlist_for_each_entry(flow, n, head, hash_node[old_ver])
> + ovs_flow_tbl_insert(new, flow);
> }
> + old->keep_flows = true;
> +}
> +
> +static struct flow_table *__flow_tbl_rehash(struct flow_table *table,
> + int n_buckets)
> +{
> + struct flow_table *new_table;
> +
> + new_table = ovs_flow_tbl_alloc(n_buckets);
> + if (!new_table)
> + return ERR_PTR(-ENOMEM);
> +
> + flow_table_copy_flows(table, new_table);
>
> return new_table;
> }
>
> +struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
> +{
> + return __flow_tbl_rehash(table, table->n_buckets);
> +}
> +
> +struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
> +{
> + return __flow_tbl_rehash(table, table->n_buckets * 2);
> +}
> +
> /* RCU callback used by ovs_flow_deferred_free. */
> static void rcu_free_flow_callback(struct rcu_head *rcu)
> {
> @@ -761,7 +791,7 @@ out:
>
> u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
> {
> - return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
> + return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
> }
>
> struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
> @@ -775,7 +805,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
> hash = ovs_flow_hash(key, key_len);
>
> head = find_bucket(table, hash);
> - hlist_for_each_entry_rcu(flow, n, head, hash_node) {
> + hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
>
> if (flow->hash == hash &&
> !memcmp(&flow->key, key, key_len)) {
> @@ -790,17 +820,15 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
> struct hlist_head *head;
>
> head = find_bucket(table, flow->hash);
> - hlist_add_head_rcu(&flow->hash_node, head);
> + hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
> table->count++;
> }
>
> void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
> {
> - if (!hlist_unhashed(&flow->hash_node)) {
> - hlist_del_init_rcu(&flow->hash_node);
> - table->count--;
> - BUG_ON(table->count < 0);
> - }
> + hlist_del_rcu(&flow->hash_node[table->node_ver]);
> + table->count--;
> + BUG_ON(table->count < 0);
> }
>
> /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
> @@ -1345,8 +1373,6 @@ int ovs_flow_init(void)
> if (flow_cache == NULL)
> return -ENOMEM;
>
> - get_random_bytes(&hash_seed, sizeof(hash_seed));
> -
> return 0;
> }
>
> diff --git a/datapath/flow.h b/datapath/flow.h
> index 36e738d..1a91d68 100644
> --- a/datapath/flow.h
> +++ b/datapath/flow.h
> @@ -96,7 +96,7 @@ struct sw_flow_key {
>
> struct sw_flow {
> struct rcu_head rcu;
> - struct hlist_node hash_node;
> + struct hlist_node hash_node[2];
> u32 hash;
>
> struct sw_flow_key key;
> @@ -174,6 +174,9 @@ struct flow_table {
> struct flex_array *buckets;
> unsigned int count, n_buckets;
> struct rcu_head rcu;
> + int node_ver;
> + u32 hash_seed;
> + bool keep_flows;
> };
>
> static inline int ovs_flow_tbl_count(struct flow_table *table)
> @@ -192,6 +195,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table);
> void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
> struct flow_table *ovs_flow_tbl_alloc(int new_size);
> struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
> +struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
> void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
> void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
> u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
> diff --git a/datapath/linux/Modules.mk b/datapath/linux/Modules.mk
> index fdd952e..b3d8078 100644
> --- a/datapath/linux/Modules.mk
> +++ b/datapath/linux/Modules.mk
> @@ -9,7 +9,8 @@ openvswitch_sources += \
> linux/compat/netdevice.c \
> linux/compat/reciprocal_div.c \
> linux/compat/skbuff-openvswitch.c \
> - linux/compat/time.c
> + linux/compat/time.c \
> + linux/compat/workqueue.c
> openvswitch_headers += \
> linux/compat/include/linux/compiler.h \
> linux/compat/include/linux/compiler-gcc.h \
> diff --git a/datapath/linux/compat/include/linux/workqueue.h b/datapath/linux/compat/include/linux/workqueue.h
> index 01c6345..2754f60 100644
> --- a/datapath/linux/compat/include/linux/workqueue.h
> +++ b/datapath/linux/compat/include/linux/workqueue.h
> @@ -1,41 +1,75 @@
> #ifndef __LINUX_WORKQUEUE_WRAPPER_H
> #define __LINUX_WORKQUEUE_WRAPPER_H 1
>
> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
> #include_next <linux/workqueue.h>
> +static inline int __init ovs_workqueues_init(void) { return 0; }
> +static inline void ovs_workqueues_exit(void) {}
> +
> +#else
> +#include <linux/timer.h>
> +
> +int __init ovs_workqueues_init(void);
> +void ovs_workqueues_exit(void);
>
> -#include <linux/version.h>
> -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
>
> /* Older kernels have an implementation of work queues with some very bad
> * characteristics when trying to cancel work (potential deadlocks, use after
> - * free, etc. Here we directly use timers instead for delayed work. It's not
> - * optimal but it is better than the alternative. Note that work queues
> - * normally run in process context but this will cause them to operate in
> - * softirq context.
> + * free, etc. Therefore we implement simple ovs specific work queue using
> + * single worker thread. work-queue API are kept similar for compatibility.
> */
>
> -#include <linux/timer.h>
> +struct workqueue_struct;
>
> -#undef DECLARE_DELAYED_WORK
> -#define DECLARE_DELAYED_WORK(n, f) \
> - struct timer_list n = TIMER_INITIALIZER((void (*)(unsigned long))f, 0, 0)
> +struct work_struct;
> +typedef void (*work_func_t)(struct work_struct *work);
>
> -#define schedule_delayed_work rpl_schedule_delayed_work
> -static inline int schedule_delayed_work(struct timer_list *timer, unsigned long delay)
> -{
> - if (timer_pending(timer))
> - return 0;
> +#define work_data_bits(work) ((unsigned long *)(&(work)->data))
> +
> +struct work_struct {
> +#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
> + atomic_long_t data;
> + struct list_head entry;
> + work_func_t func;
> +};
> +
> +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
> +
> +#define work_clear_pending(work) \
> + clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
>
> - mod_timer(timer, jiffies + delay);
> - return 1;
> +struct delayed_work {
> + struct work_struct work;
> + struct timer_list timer;
> +};
> +
> +#define __WORK_INITIALIZER(n, f) { \
> + .data = WORK_DATA_INIT(), \
> + .entry = { &(n).entry, &(n).entry }, \
> + .func = (f), \
> }
>
> -#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
> -static inline int cancel_delayed_work_sync(struct timer_list *timer)
> -{
> - return del_timer_sync(timer);
> +#define __DELAYED_WORK_INITIALIZER(n, f) { \
> + .work = __WORK_INITIALIZER((n).work, (f)), \
> + .timer = TIMER_INITIALIZER(NULL, 0, 0), \
> }
>
> +#define DECLARE_DELAYED_WORK(n, f) \
> + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
> +
> +#define schedule_delayed_work rpl_schedule_delayed_work
> +int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
> +
> +#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
> +int cancel_delayed_work_sync(struct delayed_work *dwork);
> +
> +#define INIT_WORK(_work, _func) \
> + do { \
> + (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
> + INIT_LIST_HEAD(&(_work)->entry); \
> + (_work)->func = (_func); \
> + } while (0)
> +
> #endif /* kernel version < 2.6.23 */
>
> #endif
> diff --git a/datapath/linux/compat/workqueue.c b/datapath/linux/compat/workqueue.c
> new file mode 100644
> index 0000000..f74a08f
> --- /dev/null
> +++ b/datapath/linux/compat/workqueue.c
> @@ -0,0 +1,217 @@
> +/*
> + * Derived from the kernel/workqueue.c
> + *
> + * This is the generic async execution mechanism. Work items as are
> + * executed in process context.
> + *
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/sched.h>
> +#include <linux/init.h>
> +#include <linux/signal.h>
> +#include <linux/completion.h>
> +#include <linux/workqueue.h>
> +#include <linux/slab.h>
> +#include <linux/cpu.h>
> +#include <linux/notifier.h>
> +#include <linux/kthread.h>
> +#include <linux/hardirq.h>
> +#include <linux/mempolicy.h>
> +#include <linux/kallsyms.h>
> +#include <linux/debug_locks.h>
> +#include <linux/lockdep.h>
> +#include <linux/idr.h>
> +
> +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
> +
> +static spinlock_t wq_lock;
> +static struct list_head workq;
> +static wait_queue_head_t more_work;
> +static struct task_struct *workq_thread;
> +static struct work_struct *current_work;
> +
> +static void queue_work(struct work_struct *work)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(&wq_lock, flags);
> + list_add_tail(&work->entry, &workq);
> + wake_up(&more_work);
> + spin_unlock_irqrestore(&wq_lock, flags);
> +}
> +
> +static void _delayed_work_timer_fn(unsigned long __data)
> +{
> + struct delayed_work *dwork = (struct delayed_work *)__data;
> + queue_work(&dwork->work);
> +}
> +
> +static void __queue_delayed_work(struct delayed_work *dwork,
> + unsigned long delay)
> +{
> + struct timer_list *timer = &dwork->timer;
> + struct work_struct *work = &dwork->work;
> +
> + BUG_ON(timer_pending(timer));
> + BUG_ON(!list_empty(&work->entry));
> +
> + timer->expires = jiffies + delay;
> + timer->data = (unsigned long)dwork;
> + timer->function = _delayed_work_timer_fn;
> +
> + add_timer(timer);
> +}
> +
> +int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
> +{
> + if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work)))
> + return 0;
> +
> + if (delay == 0)
> + queue_work(&dwork->work);
> + else
> + __queue_delayed_work(dwork, delay);
> +
> + return 1;
> +}
> +
> +struct wq_barrier {
> + struct work_struct work;
> + struct completion done;
> +};
> +
> +static void wq_barrier_func(struct work_struct *work)
> +{
> + struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
> + complete(&barr->done);
> +}
> +
> +static void workqueue_barrier(struct work_struct *work)
> +{
> + bool need_barrier;
> + struct wq_barrier barr;
> +
> + spin_lock_irq(&wq_lock);
> + if (current_work != work)
> + need_barrier = false;
> + else {
> + INIT_WORK(&barr.work, wq_barrier_func);
> + init_completion(&barr.done);
> + list_add(&work->entry, &workq);
> + wake_up(&more_work);
> + need_barrier = true;
> + }
> + spin_unlock_irq(&wq_lock);
> +
> + if (need_barrier)
> + wait_for_completion(&barr.done);
> +}
> +
> +static int try_to_grab_pending(struct work_struct *work)
> +{
> + int ret;
> +
> + BUG_ON(in_interrupt());
> +
> + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
> + return 0;
> +
> + spin_lock_irq(&wq_lock);
> + if (!list_empty(&work->entry)) {
> + list_del_init(&work->entry);
> + ret = 0;
> + } else
> + /* Already executed, retry. */
> + ret = -1;
> + spin_unlock_irq(&wq_lock);
> +
> + return ret;
> +}
> +
> +static int __cancel_work_timer(struct work_struct *work,
> + struct timer_list *timer)
> +{
> + int ret;
> +
> + for (;;) {
> + ret = (timer && likely(del_timer(timer)));
> + if (ret) /* Was active timer, return true. */
> + break;
> +
> + /* Inactive timer case */
> + ret = try_to_grab_pending(work);
> + if (!ret)
> + break;
> + }
> + workqueue_barrier(work);
> + work_clear_pending(work);
> + return ret;
> +}
> +
> +int cancel_delayed_work_sync(struct delayed_work *dwork)
> +{
> + return __cancel_work_timer(&dwork->work, &dwork->timer);
> +}
> +
> +static void run_workqueue(void)
> +{
> + spin_lock_irq(&wq_lock);
> + while (!list_empty(&workq)) {
> + struct work_struct *work = list_entry(workq.next,
> + struct work_struct, entry);
> +
> + work_func_t f = work->func;
> + list_del_init(workq.next);
> + current_work = work;
> + spin_unlock_irq(&wq_lock);
> +
> + work_clear_pending(work);
> + f(work);
> +
> + BUG_ON(in_interrupt());
> + spin_lock_irq(&wq_lock);
> + current_work = NULL;
> + }
> + spin_unlock_irq(&wq_lock);
> +}
> +
> +static int worker_thread(void *dummy)
> +{
> + DEFINE_WAIT(wait);
> +
> + for (;;) {
> + prepare_to_wait(&more_work, &wait, TASK_INTERRUPTIBLE);
> + if (!kthread_should_stop() && list_empty(&workq))
> + schedule();
> + finish_wait(&more_work, &wait);
> +
> + if (kthread_should_stop())
> + break;
> +
> + run_workqueue();
> + }
> +
> + return 0;
> +}
> +
> +int __init ovs_workqueues_init(void)
> +{
> + spin_lock_init(&wq_lock);
> + INIT_LIST_HEAD(&workq);
> + init_waitqueue_head(&more_work);
> +
> + workq_thread = kthread_create(worker_thread, NULL, "ovs_workq");
> + if (IS_ERR(workq_thread))
> + return PTR_ERR(workq_thread);
> +
> + wake_up_process(workq_thread);
> + return 0;
> +}
> +
> +void ovs_workqueues_exit(void)
> +{
> + BUG_ON(!list_empty(&workq));
> + kthread_stop(workq_thread);
> +}
> +#endif
> diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
> index 0578b5f..21f1060 100644
> --- a/include/linux/openvswitch.h
> +++ b/include/linux/openvswitch.h
> @@ -66,7 +66,8 @@ enum ovs_datapath_cmd {
> OVS_DP_CMD_NEW,
> OVS_DP_CMD_DEL,
> OVS_DP_CMD_GET,
> - OVS_DP_CMD_SET
> + OVS_DP_CMD_SET,
> + OVS_DP_CMD_REHASH,
> };
>
> /**
> --
> 1.7.1
>
More information about the dev
mailing list