[ovs-dev] [PATCH RFC 2/4] datapath: Move mega-flow list out of rehashing struct.
Pravin B Shelar
pshelar at nicira.com
Fri Sep 20 22:27:07 UTC 2013
ovs-flow rehash does not touch mega flow list. Following patch
moves it dp struct datapath. Avoid one extra indirection for
accessing mega-flow list head on every packet receive.
Signed-off-by: Pravin B Shelar <pshelar at nicira.com>
---
datapath/datapath.c | 81 +++++---------------
datapath/datapath.h | 6 +-
datapath/flow_table.c | 205 +++++++++++++++++++++++++++++++------------------
datapath/flow_table.h | 28 +++----
4 files changed, 163 insertions(+), 157 deletions(-)
diff --git a/datapath/datapath.c b/datapath/datapath.c
index 13002b0..ff572ac 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -60,8 +60,6 @@
#include "vport-internal_dev.h"
#include "vport-netdev.h"
-#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
-
int ovs_net_id __read_mostly;
static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
@@ -164,7 +162,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
+ ovs_flow_tbl_destroy(&dp->table, false);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -236,7 +234,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
/* Look up flow. */
- flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
+ flow = ovs_flow_lookup(&dp->table, &key);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -455,23 +453,6 @@ out:
return err;
}
-/* Called with ovs_mutex. */
-static int flush_flows(struct datapath *dp)
-{
- struct flow_table *old_table;
- struct flow_table *new_table;
-
- old_table = ovsl_dereference(dp->table);
- new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
- if (!new_table)
- return -ENOMEM;
-
- rcu_assign_pointer(dp->table, new_table);
-
- ovs_flow_tbl_destroy(old_table, true);
- return 0;
-}
-
static void clear_stats(struct sw_flow *flow)
{
flow->used = 0;
@@ -586,11 +567,9 @@ static struct genl_ops dp_packet_genl_ops[] = {
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
- struct flow_table *table;
int i;
- table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
- stats->n_flows = ovs_flow_tbl_count(table);
+ stats->n_flows = ovs_flow_tbl_count(&dp->table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
@@ -776,7 +755,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_actions *acts = NULL;
struct sw_flow_match match;
int error;
@@ -817,12 +795,9 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (!dp)
goto err_unlock_ovs;
- table = ovsl_dereference(dp->table);
-
/* Check if this is a duplicate flow */
- flow = ovs_flow_lookup(table, &key);
+ flow = ovs_flow_lookup(&dp->table, &key);
if (!flow) {
- struct flow_table *new_table = NULL;
struct sw_flow_mask *mask_p;
/* Bail out if we're not allowed to create a new flow. */
@@ -830,19 +805,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
goto err_unlock_ovs;
- /* Expand table, if necessary, to make room. */
- if (ovs_flow_tbl_need_to_expand(table))
- new_table = ovs_flow_tbl_expand(table);
- else if (time_after(jiffies, dp->last_rehash + REHASH_FLOW_INTERVAL))
- new_table = ovs_flow_tbl_rehash(table);
-
- if (new_table && !IS_ERR(new_table)) {
- rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_destroy(table, true);
- table = ovsl_dereference(dp->table);
- dp->last_rehash = jiffies;
- }
-
/* Allocate flow. */
flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
@@ -855,7 +817,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
flow->unmasked_key = key;
/* Make sure mask is unique in the system */
- mask_p = ovs_sw_flow_mask_find(table, &mask);
+ mask_p = ovs_sw_flow_mask_find(&dp->table, &mask);
if (!mask_p) {
/* Allocate a new mask if none exsits. */
mask_p = ovs_sw_flow_mask_alloc();
@@ -863,7 +825,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_flow_free;
mask_p->key = mask.key;
mask_p->range = mask.range;
- ovs_sw_flow_mask_insert(table, mask_p);
+ ovs_sw_flow_mask_insert(&dp->table, mask_p);
}
ovs_sw_flow_mask_add_ref(mask_p);
@@ -871,7 +833,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- ovs_flow_insert(table, flow);
+ ovs_flow_insert(&dp->table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -939,7 +901,6 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_match match;
int err;
@@ -960,8 +921,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- table = ovsl_dereference(dp->table);
- flow = ovs_flow_lookup_unmasked_key(table, &match);
+ flow = ovs_flow_lookup_unmasked_key(&dp->table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
@@ -989,7 +949,6 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct flow_table *table;
struct sw_flow_match match;
int err;
@@ -1001,7 +960,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
if (!a[OVS_FLOW_ATTR_KEY]) {
- err = flush_flows(dp);
+ err = ovs_flush_flows(&dp->table);
goto unlock;
}
@@ -1010,10 +969,9 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock;
- table = ovsl_dereference(dp->table);
- flow = ovs_flow_lookup_unmasked_key(table, &match);
- if (!flow) {
- err = -ENOENT;
+ flow = ovs_flow_lookup_unmasked_key(&dp->table, &match);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
goto unlock;
}
@@ -1023,7 +981,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- ovs_flow_remove(table, flow);
+ ovs_flow_remove(&dp->table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
@@ -1042,8 +1000,8 @@ unlock:
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+ struct hash_table *htable;
struct datapath *dp;
- struct flow_table *table;
rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1051,15 +1009,15 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_unlock();
return -ENODEV;
}
+ htable = rcu_dereference(dp->table.htable);
- table = rcu_dereference(dp->table);
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow = ovs_flow_dump_next(table, &bucket, &obj);
+ flow = ovs_flow_dump_next(htable, &bucket, &obj);
if (!flow)
break;
@@ -1223,9 +1181,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
/* Allocate table. */
- err = -ENOMEM;
- rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
- if (!dp->table)
+ err = ovs_flow_table_init(&dp->table);
+ if (err)
goto err_free_dp;
dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
@@ -1282,7 +1239,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
+ ovs_flow_tbl_destroy(&dp->table, false);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
diff --git a/datapath/datapath.h b/datapath/datapath.h
index 403ea00..64920de 100644
--- a/datapath/datapath.h
+++ b/datapath/datapath.h
@@ -60,12 +60,11 @@ struct dp_stats_percpu {
* struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction.
* @list_node: Element in global 'dps' list.
- * @table: Current flow table. Protected by ovs_mutex and RCU.
+ * @table: flow table.
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
* ovs_mutex and RCU.
* @stats_percpu: Per-CPU datapath statistics.
* @net: Reference to net namespace.
- * @last_rehash: Timestamp of last rehash.
*
* Context: See the comment on locking at the top of datapath.c for additional
* locking information.
@@ -75,7 +74,7 @@ struct datapath {
struct list_head list_node;
/* Flow table. */
- struct flow_table __rcu *table;
+ struct flow_table table;
/* Switch ports. */
struct hlist_head *ports;
@@ -87,7 +86,6 @@ struct datapath {
/* Network namespace ref. */
struct net *net;
#endif
- unsigned long last_rehash;
};
/**
diff --git a/datapath/flow_table.c b/datapath/flow_table.c
index cfabc44..f7655fa 100644
--- a/datapath/flow_table.c
+++ b/datapath/flow_table.c
@@ -44,8 +44,12 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
+#include "datapath.h"
#include "vlan.h"
+#define TBL_MIN_BUCKETS 1024
+#define REHASH_INTERVAL (10 * 60 * HZ)
+
static struct kmem_cache *flow_cache;
void ovs_match_init(struct sw_flow_match *match,
@@ -136,6 +140,13 @@ struct sw_flow *ovs_flow_alloc(void)
return flow;
}
+int ovs_flow_tbl_count(struct flow_table *table)
+{
+ struct hash_table *htable = rcu_dereference_check(table->htable, lockdep_ovsl_is_held());
+
+ return htable->count;
+}
+
static struct flex_array *alloc_buckets(unsigned int n_buckets)
{
struct flex_array *buckets;
@@ -190,7 +201,7 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
-static void __flow_tbl_destroy(struct flow_table *table)
+static void __flow_tbl_destroy(struct hash_table *table)
{
int i;
@@ -209,17 +220,14 @@ static void __flow_tbl_destroy(struct flow_table *table)
}
}
- BUG_ON(!list_empty(table->mask_list));
- kfree(table->mask_list);
-
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
-static struct flow_table *__flow_tbl_alloc(int new_size)
+static struct hash_table *__flow_tbl_alloc(int new_size)
{
- struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+ struct hash_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
@@ -235,37 +243,33 @@ static struct flow_table *__flow_tbl_alloc(int new_size)
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
- table->mask_list = NULL;
return table;
}
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
+int ovs_flow_table_init(struct flow_table *table)
{
- struct flow_table *table = __flow_tbl_alloc(new_size);
+ struct hash_table *htable;
- if (!table)
- return NULL;
+ htable = __flow_tbl_alloc(TBL_MIN_BUCKETS);
- table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!table->mask_list) {
- table->keep_flows = true;
- __flow_tbl_destroy(table);
- return NULL;
- }
- INIT_LIST_HEAD(table->mask_list);
+ if (!htable)
+ return -ENOMEM;
- return table;
+ rcu_assign_pointer(table->htable, htable);
+ INIT_LIST_HEAD(&table->mask_list);
+ table->last_rehash = jiffies;
+ return 0;
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
- struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+ struct hash_table *table = container_of(rcu, struct hash_table, rcu);
__flow_tbl_destroy(table);
}
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
+static void __ovs_flow_tbl_destroy(struct hash_table *table, bool deferred)
{
if (!table)
return;
@@ -276,7 +280,14 @@ void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
__flow_tbl_destroy(table);
}
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table,
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
+{
+ struct hash_table *htable = ovsl_dereference(table->htable);
+
+ __ovs_flow_tbl_destroy(htable, deferred);
+}
+
+struct sw_flow *ovs_flow_dump_next(struct hash_table *table,
u32 *bucket, u32 *last)
{
struct sw_flow *flow;
@@ -303,14 +314,14 @@ struct sw_flow *ovs_flow_dump_next(struct flow_table *table,
return NULL;
}
-static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+static struct hlist_head *find_bucket(struct hash_table *table, u32 hash)
{
hash = jhash_1word(hash, table->hash_seed);
return flex_array_get(table->buckets,
(hash & (table->n_buckets - 1)));
}
-static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
+static void __tbl_insert(struct hash_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
@@ -320,8 +331,8 @@ static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
table->count++;
}
-static void flow_table_copy_flows(struct flow_table *old,
- struct flow_table *new)
+static void flow_table_copy_flows(struct hash_table *old,
+ struct hash_table *new)
{
int old_ver;
int i;
@@ -340,32 +351,37 @@ static void flow_table_copy_flows(struct flow_table *old,
__tbl_insert(new, flow);
}
- new->mask_list = old->mask_list;
old->keep_flows = true;
}
-static struct flow_table *__flow_tbl_rehash(struct flow_table *table,
+static struct hash_table *__flow_tbl_rehash(struct hash_table *table,
int n_buckets)
{
- struct flow_table *new_table;
+ struct hash_table *new_htable;
- new_table = __flow_tbl_alloc(n_buckets);
- if (!new_table)
+ new_htable = __flow_tbl_alloc(n_buckets);
+ if (!new_htable)
return ERR_PTR(-ENOMEM);
- flow_table_copy_flows(table, new_table);
+ flow_table_copy_flows(table, new_htable);
- return new_table;
+ return new_htable;
}
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+int ovs_flush_flows(struct flow_table *flow_table)
{
- return __flow_tbl_rehash(table, table->n_buckets);
-}
+ struct hash_table *old_table;
+ struct hash_table *new_htable;
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
-{
- return __flow_tbl_rehash(table, table->n_buckets * 2);
+ old_table = ovsl_dereference(flow_table->htable);
+ new_htable = __flow_tbl_alloc(TBL_MIN_BUCKETS);
+ if (!new_htable)
+ return -ENOMEM;
+
+ rcu_assign_pointer(flow_table->htable, new_htable);
+
+ __ovs_flow_tbl_destroy(old_table, true);
+ return 0;
}
static u32 flow_hash(const struct sw_flow_key *key, int key_start,
@@ -418,31 +434,7 @@ static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
return __cmp_key(&flow->unmasked_key, key, key_start, key_end);
}
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- const struct sw_flow_key *key, int key_end)
-{
- int key_start;
- key_start = flow_key_start(key);
-
- return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
-
-}
-
-struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
- struct sw_flow_match *match)
-{
- struct sw_flow_key *unmasked = match->key;
- int key_end = match->range.end;
- struct sw_flow *flow;
-
- flow = ovs_flow_lookup(table, unmasked);
- if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
- flow = NULL;
-
- return flow;
-}
-
-static struct sw_flow *masked_flow_lookup(struct flow_table *table,
+static struct sw_flow *masked_flow_lookup(struct hash_table *table,
const struct sw_flow_key *unmasked,
struct sw_flow_mask *mask)
{
@@ -465,14 +457,15 @@ static struct sw_flow *masked_flow_lookup(struct flow_table *table,
return NULL;
}
-struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
- const struct sw_flow_key *key)
+static struct sw_flow *__flow_lookup(struct list_head *mask_list,
+ struct hash_table *htbl,
+ const struct sw_flow_key *key)
{
struct sw_flow *flow = NULL;
struct sw_flow_mask *mask;
- list_for_each_entry_rcu(mask, tbl->mask_list, list) {
- flow = masked_flow_lookup(tbl, key, mask);
+ list_for_each_entry_rcu(mask, mask_list, list) {
+ flow = masked_flow_lookup(htbl, key, mask);
if (flow) /* Found */
break;
}
@@ -480,18 +473,82 @@ struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
return flow;
}
+struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ struct hash_table *htbl = rcu_dereference(tbl->htable);
+
+ return __flow_lookup(&tbl->mask_list, htbl, key);
+}
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key,
+ int key_end)
+{
+ int key_start;
+ key_start = flow_key_start(key);
+
+ return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
+
+}
+
+struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
+ struct sw_flow_match *match)
+{
+ struct hash_table *htable = ovsl_dereference(table->htable);
+ struct sw_flow_key *unmasked = match->key;
+ int key_end = match->range.end;
+ struct sw_flow *flow;
+
+ flow = __flow_lookup(&table->mask_list, htable, unmasked);
+ if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
+ flow = NULL;
+
+ return flow;
+}
+
+static struct hash_table *flow_tbl_rehash(struct hash_table *table)
+{
+ return __flow_tbl_rehash(table, table->n_buckets);
+}
+
+static struct hash_table *flow_tbl_expand(struct hash_table *table)
+{
+ return __flow_tbl_rehash(table, table->n_buckets * 2);
+}
+
void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
{
+ struct hash_table *htable = NULL;
+ struct hash_table *new_htable = NULL;
+
+ htable = ovsl_dereference(table->htable);
+
+ /* Expand table, if necessary, to make room. */
+ if (htable->count > htable->n_buckets)
+ new_htable = flow_tbl_expand(htable);
+ else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
+ new_htable = flow_tbl_rehash(htable);
+
+ if (new_htable && !IS_ERR(new_htable)) {
+ rcu_assign_pointer(table->htable, new_htable);
+ ovs_flow_tbl_destroy(table, true);
+ htable = ovsl_dereference(table->htable);
+ table->last_rehash = jiffies;
+ }
+
flow->hash = flow_hash(&flow->key, flow->mask->range.start,
flow->mask->range.end);
- __tbl_insert(table, flow);
+ __tbl_insert(htable, flow);
}
void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
{
- BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->hash_node[table->node_ver]);
- table->count--;
+ struct hash_table *htbl = ovsl_dereference(table->htable);
+
+ BUG_ON(htbl->count == 0);
+ hlist_del_rcu(&flow->hash_node[htbl->node_ver]);
+ htbl->count--;
}
struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
@@ -550,7 +607,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
{
struct list_head *ml;
- list_for_each(ml, tbl->mask_list) {
+ list_for_each(ml, &tbl->mask_list) {
struct sw_flow_mask *m;
m = container_of(ml, struct sw_flow_mask, list);
if (mask_equal(mask, m))
@@ -567,7 +624,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
*/
void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
{
- list_add_rcu(&mask->list, tbl->mask_list);
+ list_add_rcu(&mask->list, &tbl->mask_list);
}
/* Initializes the flow module.
diff --git a/datapath/flow_table.h b/datapath/flow_table.h
index 619ead6..6c3355e 100644
--- a/datapath/flow_table.h
+++ b/datapath/flow_table.h
@@ -36,18 +36,21 @@
#include "flow.h"
-#define TBL_MIN_BUCKETS 1024
-
-struct flow_table {
+struct hash_table {
struct flex_array *buckets;
unsigned int count, n_buckets;
struct rcu_head rcu;
- struct list_head *mask_list;
int node_ver;
u32 hash_seed;
bool keep_flows;
};
+struct flow_table {
+ struct hash_table __rcu *htable;
+ struct list_head mask_list;
+ unsigned long last_rehash;
+};
+
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key, struct sw_flow_mask *mask);
@@ -61,15 +64,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
-static inline int ovs_flow_tbl_count(struct flow_table *table)
-{
- return table->count;
-}
-
-static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
-{
- return (table->count > table->n_buckets);
-}
+int ovs_flow_tbl_count(struct flow_table *table);
struct sw_flow *ovs_flow_lookup(struct flow_table *,
const struct sw_flow_key *);
@@ -77,14 +72,12 @@ struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
struct sw_flow_match *match);
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
-struct flow_table *ovs_flow_tbl_alloc(int new_size);
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
+int ovs_flow_table_init(struct flow_table *);
void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow);
void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow);
-struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx);
+struct sw_flow *ovs_flow_dump_next(struct hash_table *table, u32 *bucket, u32 *idx);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
const struct sw_flow_key *key, int key_end);
@@ -97,4 +90,5 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask);
+int ovs_flush_flows(struct flow_table *flow_table);
#endif /* flow_table.h */
--
1.7.1
More information about the dev
mailing list