#include <net/ndisc.h>
#include "vlan.h"
+#include "flow_netlink.h"
+
+#define TBL_MIN_BUCKETS 1024
+#define MASK_ARRAY_SIZE_MIN 16
+#define REHASH_INTERVAL (10 * 60 * HZ)
+
+#define MC_HASH_SHIFT 8
+#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
+#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
static struct kmem_cache *flow_cache;
+struct kmem_cache *flow_stats_cache __read_mostly;
static u16 range_n_bytes(const struct sw_flow_key_range *range)
{
}
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
- const struct sw_flow_mask *mask)
+ bool full, const struct sw_flow_mask *mask)
{
- const long *m = (long *)((u8 *)&mask->key + mask->range.start);
- const long *s = (long *)((u8 *)src + mask->range.start);
- long *d = (long *)((u8 *)dst + mask->range.start);
+ int start = full ? 0 : mask->range.start;
+ int len = full ? sizeof *dst : range_n_bytes(&mask->range);
+ const long *m = (const long *)((const u8 *)&mask->key + start);
+ const long *s = (const long *)((const u8 *)src + start);
+ long *d = (long *)((u8 *)dst + start);
int i;
- /* The memory outside of the 'mask->range' are not set since
- * further operations on 'dst' only uses contents within
- * 'mask->range'.
+ /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
+ * if 'full' is false the memory outside of the 'mask->range' is left
+ * uninitialized. This can be used as an optimization when further
+ * operations on 'dst' only use contents within 'mask->range'.
*/
- for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++;
}
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
+ struct flow_stats *stats;
+ int node;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
flow->mask = NULL;
+ flow->id.ufid_len = 0;
+ flow->id.unmasked_key = NULL;
+ flow->stats_last_writer = NUMA_NO_NODE;
+
+ /* Initialize the default stat node. */
+ stats = kmem_cache_alloc_node(flow_stats_cache,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ if (!stats)
+ goto err;
+
+ spin_lock_init(&stats->lock);
+
+ RCU_INIT_POINTER(flow->stats[0], stats);
+
+ for_each_node(node)
+ if (node != 0)
+ RCU_INIT_POINTER(flow->stats[node], NULL);
return flow;
+err:
+ kmem_cache_free(flow_cache, flow);
+ return ERR_PTR(-ENOMEM);
+}
+
+int ovs_flow_tbl_count(const struct flow_table *table)
+{
+ return table->count;
}
static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
- kfree((struct sf_flow_acts __force *)flow->sf_acts);
+ int node;
+
+ if (ovs_identifier_is_key(&flow->id))
+ kfree(flow->id.unmasked_key);
+ if (flow->sf_acts)
+ ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
+ for_each_node(node)
+ if (flow->stats[node])
+ kmem_cache_free(flow_stats_cache,
+ rcu_dereference_raw(flow->stats[node]));
kmem_cache_free(flow_cache, flow);
}
flow_free(flow);
}
+static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
+{
+ struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
+
+ kfree(mask);
+}
+
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
if (!flow)
return;
- ovs_sw_flow_mask_del_ref(flow->mask, deferred);
-
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
else
flex_array_free(buckets);
}
-static void __flow_tbl_destroy(struct flow_table *table)
+
+static void __table_instance_destroy(struct table_instance *ti)
{
- int i;
+ free_buckets(ti->buckets);
+ kfree(ti);
+}
- if (table->keep_flows)
- goto skip_flows;
+static struct table_instance *table_instance_alloc(int new_size)
+{
+ struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
- for (i = 0; i < table->n_buckets; i++) {
- struct sw_flow *flow;
- struct hlist_head *head = flex_array_get(table->buckets, i);
- struct hlist_node *n;
- int ver = table->node_ver;
+ if (!ti)
+ return NULL;
- hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del(&flow->hash_node[ver]);
- ovs_flow_free(flow, false);
- }
+ ti->buckets = alloc_buckets(new_size);
+
+ if (!ti->buckets) {
+ kfree(ti);
+ return NULL;
}
+ ti->n_buckets = new_size;
+ ti->node_ver = 0;
+ ti->keep_flows = false;
+ get_random_bytes(&ti->hash_seed, sizeof(u32));
- BUG_ON(!list_empty(table->mask_list));
- kfree(table->mask_list);
+ return ti;
+}
-skip_flows:
- free_buckets(table->buckets);
- kfree(table);
+static void mask_array_rcu_cb(struct rcu_head *rcu)
+{
+ struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
+
+ kfree(ma);
}
-static struct flow_table *__flow_tbl_alloc(int new_size)
+static struct mask_array *tbl_mask_array_alloc(int size)
{
- struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+ struct mask_array *new;
- if (!table)
+ size = max(MASK_ARRAY_SIZE_MIN, size);
+ new = kzalloc(sizeof(struct mask_array) +
+ sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
+ if (!new)
return NULL;
- table->buckets = alloc_buckets(new_size);
+ new->count = 0;
+ new->max = size;
- if (!table->buckets) {
- kfree(table);
- return NULL;
+ return new;
+}
+
+static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
+{
+ struct mask_array *old;
+ struct mask_array *new;
+
+ new = tbl_mask_array_alloc(size);
+ if (!new)
+ return -ENOMEM;
+
+ old = ovsl_dereference(tbl->mask_array);
+ if (old) {
+ int i, count = 0;
+
+ for (i = 0; i < old->max; i++) {
+ if (ovsl_dereference(old->masks[i]))
+ new->masks[count++] = old->masks[i];
+ }
+
+ new->count = count;
}
- table->n_buckets = new_size;
- table->count = 0;
- table->node_ver = 0;
- table->keep_flows = false;
- get_random_bytes(&table->hash_seed, sizeof(u32));
- table->mask_list = NULL;
+ rcu_assign_pointer(tbl->mask_array, new);
+
+ if (old)
+ call_rcu(&old->rcu, mask_array_rcu_cb);
- return table;
+ return 0;
}
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
+int ovs_flow_tbl_init(struct flow_table *table)
{
- struct flow_table *table = __flow_tbl_alloc(new_size);
+ struct table_instance *ti, *ufid_ti;
+ struct mask_array *ma;
- if (!table)
- return NULL;
+ table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+ MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry));
+ if (!table->mask_cache)
+ return -ENOMEM;
- table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!table->mask_list) {
- table->keep_flows = true;
- __flow_tbl_destroy(table);
- return NULL;
- }
- INIT_LIST_HEAD(table->mask_list);
+ ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
+ if (!ma)
+ goto free_mask_cache;
- return table;
+ ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!ti)
+ goto free_mask_array;
+
+ ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!ufid_ti)
+ goto free_ti;
+
+ rcu_assign_pointer(table->ti, ti);
+ rcu_assign_pointer(table->ufid_ti, ufid_ti);
+ rcu_assign_pointer(table->mask_array, ma);
+ table->last_rehash = jiffies;
+ table->count = 0;
+ table->ufid_count = 0;
+ return 0;
+
+free_ti:
+ __table_instance_destroy(ti);
+free_mask_array:
+ kfree(ma);
+free_mask_cache:
+ free_percpu(table->mask_cache);
+ return -ENOMEM;
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
- struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+ struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
- __flow_tbl_destroy(table);
+ __table_instance_destroy(ti);
}
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
+static void table_instance_destroy(struct table_instance *ti,
+ struct table_instance *ufid_ti,
+ bool deferred)
{
- if (!table)
+ int i;
+
+ if (!ti)
return;
- if (deferred)
- call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
- else
- __flow_tbl_destroy(table);
+ BUG_ON(!ufid_ti);
+ if (ti->keep_flows)
+ goto skip_flows;
+
+ for (i = 0; i < ti->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(ti->buckets, i);
+ struct hlist_node *n;
+ int ver = ti->node_ver;
+ int ufid_ver = ufid_ti->node_ver;
+
+ hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
+ hlist_del_rcu(&flow->flow_table.node[ver]);
+ if (ovs_identifier_is_ufid(&flow->id))
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
+ ovs_flow_free(flow, deferred);
+ }
+ }
+
+skip_flows:
+ if (deferred) {
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
+ } else {
+ __table_instance_destroy(ti);
+ __table_instance_destroy(ufid_ti);
+ }
}
-struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table,
+/* No need for locking this function is called from RCU callback or
+ * error path.
+ */
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+ struct table_instance *ti = rcu_dereference_raw(table->ti);
+ struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
+
+ free_percpu(table->mask_cache);
+ kfree(rcu_dereference_raw(table->mask_array));
+ table_instance_destroy(ti, ufid_ti, false);
+}
+
+struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
u32 *bucket, u32 *last)
{
struct sw_flow *flow;
int ver;
int i;
- ver = table->node_ver;
- while (*bucket < table->n_buckets) {
+ ver = ti->node_ver;
+ while (*bucket < ti->n_buckets) {
i = 0;
- head = flex_array_get(table->buckets, *bucket);
- hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
+ head = flex_array_get(ti->buckets, *bucket);
+ hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
if (i < *last) {
i++;
continue;
return NULL;
}
-static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
{
- hash = jhash_1word(hash, table->hash_seed);
- return flex_array_get(table->buckets,
- (hash & (table->n_buckets - 1)));
+ hash = jhash_1word(hash, ti->hash_seed);
+ return flex_array_get(ti->buckets,
+ (hash & (ti->n_buckets - 1)));
}
-static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
+static void table_instance_insert(struct table_instance *ti,
+ struct sw_flow *flow)
{
struct hlist_head *head;
- head = find_bucket(table, flow->hash);
- hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+ head = find_bucket(ti, flow->flow_table.hash);
+ hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
+}
- table->count++;
+static void ufid_table_instance_insert(struct table_instance *ti,
+ struct sw_flow *flow)
+{
+ struct hlist_head *head;
+
+ head = find_bucket(ti, flow->ufid_table.hash);
+ hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
}
-static void flow_table_copy_flows(struct flow_table *old,
- struct flow_table *new)
+static void flow_table_copy_flows(struct table_instance *old,
+ struct table_instance *new, bool ufid)
{
int old_ver;
int i;
head = flex_array_get(old->buckets, i);
- hlist_for_each_entry(flow, head, hash_node[old_ver])
- __tbl_insert(new, flow);
+ if (ufid)
+ hlist_for_each_entry(flow, head,
+ ufid_table.node[old_ver])
+ ufid_table_instance_insert(new, flow);
+ else
+ hlist_for_each_entry(flow, head,
+ flow_table.node[old_ver])
+ table_instance_insert(new, flow);
}
- new->mask_list = old->mask_list;
old->keep_flows = true;
}
-static struct flow_table *__flow_tbl_rehash(struct flow_table *table,
- int n_buckets)
+static struct table_instance *table_instance_rehash(struct table_instance *ti,
+ int n_buckets, bool ufid)
{
- struct flow_table *new_table;
+ struct table_instance *new_ti;
- new_table = __flow_tbl_alloc(n_buckets);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
+ new_ti = table_instance_alloc(n_buckets);
+ if (!new_ti)
+ return NULL;
- flow_table_copy_flows(table, new_table);
+ flow_table_copy_flows(ti, new_ti, ufid);
- return new_table;
+ return new_ti;
}
-struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+int ovs_flow_tbl_flush(struct flow_table *flow_table)
{
- return __flow_tbl_rehash(table, table->n_buckets);
-}
+ struct table_instance *old_ti, *new_ti;
+ struct table_instance *old_ufid_ti, *new_ufid_ti;
-struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
-{
- return __flow_tbl_rehash(table, table->n_buckets * 2);
+ new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!new_ti)
+ return -ENOMEM;
+ new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+ if (!new_ufid_ti)
+ goto err_free_ti;
+
+ old_ti = ovsl_dereference(flow_table->ti);
+ old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
+
+ rcu_assign_pointer(flow_table->ti, new_ti);
+ rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
+ flow_table->last_rehash = jiffies;
+ flow_table->count = 0;
+ flow_table->ufid_count = 0;
+
+ table_instance_destroy(old_ti, old_ufid_ti, true);
+ return 0;
+
+err_free_ti:
+ __table_instance_destroy(new_ti);
+ return -ENOMEM;
}
-static u32 flow_hash(const struct sw_flow_key *key, int key_start,
- int key_end)
+static u32 flow_hash(const struct sw_flow_key *key,
+ const struct sw_flow_key_range *range)
{
- u32 *hash_key = (u32 *)((u8 *)key + key_start);
+ int key_start = range->start;
+ int key_end = range->end;
+ const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
int hash_u32s = (key_end - key_start) >> 2;
/* Make sure number of hash bytes are multiple of u32. */
static int flow_key_start(const struct sw_flow_key *key)
{
- if (key->tun_key.ipv4_dst)
+ if (key->tun_key.u.ipv4.dst)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
const struct sw_flow_key *key2,
int key_start, int key_end)
{
- const long *cp1 = (long *)((u8 *)key1 + key_start);
- const long *cp2 = (long *)((u8 *)key2 + key_start);
+ const long *cp1 = (const long *)((const u8 *)key1 + key_start);
+ const long *cp2 = (const long *)((const u8 *)key2 + key_start);
long diffs = 0;
int i;
static bool flow_cmp_masked_key(const struct sw_flow *flow,
const struct sw_flow_key *key,
- int key_start, int key_end)
+ const struct sw_flow_key_range *range)
{
- return cmp_key(&flow->key, key, key_start, key_end);
+ return cmp_key(&flow->key, key, range->start, range->end);
}
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- struct sw_flow_match *match)
+static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
int key_end = match->range.end;
- return cmp_key(&flow->unmasked_key, key, key_start, key_end);
+ BUG_ON(ovs_identifier_is_ufid(&flow->id));
+ return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
}
-static struct sw_flow *masked_flow_lookup(struct flow_table *table,
+static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
- struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask,
+ u32 *n_mask_hit)
{
struct sw_flow *flow;
struct hlist_head *head;
- int key_start = mask->range.start;
- int key_end = mask->range.end;
u32 hash;
struct sw_flow_key masked_key;
- ovs_flow_mask_key(&masked_key, unmasked, mask);
- hash = flow_hash(&masked_key, key_start, key_end);
- head = find_bucket(table, hash);
- hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
- if (flow->mask == mask &&
- flow_cmp_masked_key(flow, &masked_key,
- key_start, key_end))
+ ovs_flow_mask_key(&masked_key, unmasked, false, mask);
+ hash = flow_hash(&masked_key, &mask->range);
+ head = find_bucket(ti, hash);
+ (*n_mask_hit)++;
+ hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
+ if (flow->mask == mask && flow->flow_table.hash == hash &&
+ flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
}
return NULL;
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
- const struct sw_flow_key *key)
+/* Flow lookup does full lookup on flow table. It starts with
+ * mask from index passed in *index.
+ */
+static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ struct table_instance *ti,
+ const struct mask_array *ma,
+ const struct sw_flow_key *key,
+ u32 *n_mask_hit,
+ u32 *index)
{
- struct sw_flow *flow = NULL;
struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+ int i;
+
+ if (*index < ma->max) {
+ mask = rcu_dereference_ovsl(ma->masks[*index]);
+ if (mask) {
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow)
+ return flow;
+ }
+ }
+
+ for (i = 0; i < ma->max; i++) {
+
+ if (i == *index)
+ continue;
+
+ mask = rcu_dereference_ovsl(ma->masks[i]);
+ if (!mask)
+ continue;
- list_for_each_entry_rcu(mask, tbl->mask_list, list) {
- flow = masked_flow_lookup(tbl, key, mask);
- if (flow) /* Found */
- break;
+ flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ if (flow) { /* Found */
+ *index = i;
+ return flow;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * mask_cache maps flow to probable mask. This cache is not tightly
+ * coupled cache, It means updates to mask list can result in inconsistent
+ * cache entry in mask cache.
+ * This is per cpu cache and is divided in MC_HASH_SEGS segments.
+ * In case of a hash collision the entry is hashed in next segment.
+ */
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
+ const struct sw_flow_key *key,
+ u32 skb_hash,
+ u32 *n_mask_hit)
+{
+ struct mask_array *ma = rcu_dereference(tbl->mask_array);
+ struct table_instance *ti = rcu_dereference(tbl->ti);
+ struct mask_cache_entry *entries, *ce;
+ struct sw_flow *flow;
+ u32 hash;
+ int seg;
+
+ *n_mask_hit = 0;
+ if (unlikely(!skb_hash)) {
+ u32 mask_index = 0;
+
+ return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
}
+ /* Pre and post recirulation flows usually have the same skb_hash
+ * value. To avoid hash collisions, rehash the 'skb_hash' with
+ * 'recirc_id'. */
+ if (key->recirc_id)
+ skb_hash = jhash_1word(skb_hash, key->recirc_id);
+
+ ce = NULL;
+ hash = skb_hash;
+ entries = this_cpu_ptr(tbl->mask_cache);
+
+ /* Find the cache entry 'ce' to operate on. */
+ for (seg = 0; seg < MC_HASH_SEGS; seg++) {
+ int index = hash & (MC_HASH_ENTRIES - 1);
+ struct mask_cache_entry *e;
+
+ e = &entries[index];
+ if (e->skb_hash == skb_hash) {
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
+ &e->mask_index);
+ if (!flow)
+ e->skb_hash = 0;
+ return flow;
+ }
+
+ if (!ce || e->skb_hash < ce->skb_hash)
+ ce = e; /* A better replacement cache candidate. */
+
+ hash >>= MC_HASH_SHIFT;
+ }
+
+ /* Cache miss, do full lookup. */
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+ if (flow)
+ ce->skb_hash = skb_hash;
+
return flow;
}
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
{
- flow->hash = flow_hash(&flow->key, flow->mask->range.start,
- flow->mask->range.end);
- __tbl_insert(table, flow);
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+ struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
+ u32 __always_unused n_mask_hit;
+ u32 index = 0;
+
+ return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
}
-void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+ const struct sw_flow_match *match)
{
- BUG_ON(table->count == 0);
- hlist_del_rcu(&flow->hash_node[table->node_ver]);
- table->count--;
+ struct mask_array *ma = ovsl_dereference(tbl->mask_array);
+ int i;
+
+ /* Always called under ovs-mutex. */
+ for (i = 0; i < ma->max; i++) {
+ struct table_instance *ti = ovsl_dereference(tbl->ti);
+ u32 __always_unused n_mask_hit;
+ struct sw_flow_mask *mask;
+ struct sw_flow *flow;
+
+ mask = ovsl_dereference(ma->masks[i]);
+ if (!mask)
+ continue;
+ flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
+ if (flow && ovs_identifier_is_key(&flow->id) &&
+ ovs_flow_cmp_unmasked_key(flow, match))
+ return flow;
+ }
+ return NULL;
}
-struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
+static u32 ufid_hash(const struct sw_flow_id *sfid)
{
- struct sw_flow_mask *mask;
+ return jhash(sfid->ufid, sfid->ufid_len, 0);
+}
- mask = kmalloc(sizeof(*mask), GFP_KERNEL);
- if (mask)
- mask->ref_count = 0;
+static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
+ const struct sw_flow_id *sfid)
+{
+ if (flow->id.ufid_len != sfid->ufid_len)
+ return false;
- return mask;
+ return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
}
-void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
+bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
{
- mask->ref_count++;
+ if (ovs_identifier_is_ufid(&flow->id))
+ return flow_cmp_masked_key(flow, match->key, &match->range);
+
+ return ovs_flow_cmp_unmasked_key(flow, match);
}
-static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
+struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
+ const struct sw_flow_id *ufid)
{
- struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
+ struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ u32 hash;
- kfree(mask);
+ hash = ufid_hash(ufid);
+ head = find_bucket(ti, hash);
+ hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
+ if (flow->ufid_table.hash == hash &&
+ ovs_flow_cmp_ufid(flow, ufid))
+ return flow;
+ }
+ return NULL;
}
-void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
- if (!mask)
- return;
+ struct mask_array *ma;
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
+ ma = rcu_dereference_ovsl(table->mask_array);
+ return ma->count;
+}
+
+static struct table_instance *table_instance_expand(struct table_instance *ti,
+ bool ufid)
+{
+ return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
+}
+
+static void tbl_mask_array_delete_mask(struct mask_array *ma,
+ struct sw_flow_mask *mask)
+{
+ int i;
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- if (deferred)
+ /* Remove the deleted mask pointers from the array */
+ for (i = 0; i < ma->max; i++) {
+ if (mask == ovsl_dereference(ma->masks[i])) {
+ RCU_INIT_POINTER(ma->masks[i], NULL);
+ ma->count--;
call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
- else
- kfree(mask);
+ return;
+ }
+ }
+ BUG();
+}
+
+/* Remove 'mask' from the mask list, if it is not needed any more. */
+static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+ if (mask) {
+ /* ovs-lock is required to protect mask-refcount and
+ * mask list.
+ */
+ ASSERT_OVSL();
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ struct mask_array *ma;
+
+ ma = ovsl_dereference(tbl->mask_array);
+ tbl_mask_array_delete_mask(ma, mask);
+
+ /* Shrink the mask array if necessary. */
+ if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
+ ma->count <= (ma->max / 3))
+ tbl_mask_array_realloc(tbl, ma->max / 2);
+
+ }
+ }
+}
+
+/* Must be called with OVS mutex held. */
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+ struct table_instance *ti = ovsl_dereference(table->ti);
+ struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
+
+ BUG_ON(table->count == 0);
+ hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
+ table->count--;
+ if (ovs_identifier_is_ufid(&flow->id)) {
+ hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
+ table->ufid_count--;
}
+
+ /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
+ * accessible as long as the RCU read lock is held.
+ */
+ flow_mask_remove(table, flow->mask);
+}
+
+static struct sw_flow_mask *mask_alloc(void)
+{
+ struct sw_flow_mask *mask;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (mask)
+ mask->ref_count = 1;
+
+ return mask;
}
static bool mask_equal(const struct sw_flow_mask *a,
const struct sw_flow_mask *b)
{
- u8 *a_ = (u8 *)&a->key + a->range.start;
- u8 *b_ = (u8 *)&b->key + b->range.start;
+ const u8 *a_ = (const u8 *)&a->key + a->range.start;
+ const u8 *b_ = (const u8 *)&b->key + b->range.start;
return (a->range.end == b->range.end)
&& (a->range.start == b->range.start)
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
}
-struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
+static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
const struct sw_flow_mask *mask)
{
- struct list_head *ml;
+ struct mask_array *ma;
+ int i;
- list_for_each(ml, tbl->mask_list) {
- struct sw_flow_mask *m;
- m = container_of(ml, struct sw_flow_mask, list);
- if (mask_equal(mask, m))
- return m;
+ ma = ovsl_dereference(tbl->mask_array);
+ for (i = 0; i < ma->max; i++) {
+ struct sw_flow_mask *t;
+
+ t = ovsl_dereference(ma->masks[i]);
+ if (t && mask_equal(mask, t))
+ return t;
}
return NULL;
}
-/**
- * add a new mask into the mask list.
- * The caller needs to make sure that 'mask' is not the same
- * as any masks that are already on the list.
- */
-void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
+/* Add 'mask' into the mask list, if it is not already there. */
+static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
+ const struct sw_flow_mask *new)
{
- list_add_rcu(&mask->list, tbl->mask_list);
+ struct sw_flow_mask *mask;
+
+ mask = flow_mask_find(tbl, new);
+ if (!mask) {
+ struct mask_array *ma;
+ int i;
+
+ /* Allocate a new mask if none exsits. */
+ mask = mask_alloc();
+ if (!mask)
+ return -ENOMEM;
+
+ mask->key = new->key;
+ mask->range = new->range;
+
+ /* Add mask to mask-list. */
+ ma = ovsl_dereference(tbl->mask_array);
+ if (ma->count >= ma->max) {
+ int err;
+
+ err = tbl_mask_array_realloc(tbl, ma->max +
+ MASK_ARRAY_SIZE_MIN);
+ if (err) {
+ kfree(mask);
+ return err;
+ }
+ ma = ovsl_dereference(tbl->mask_array);
+ }
+
+ for (i = 0; i < ma->max; i++) {
+ struct sw_flow_mask *t;
+
+ t = ovsl_dereference(ma->masks[i]);
+ if (!t) {
+ rcu_assign_pointer(ma->masks[i], mask);
+ ma->count++;
+ break;
+ }
+ }
+
+ } else {
+ BUG_ON(!mask->ref_count);
+ mask->ref_count++;
+ }
+
+ flow->mask = mask;
+ return 0;
+}
+
+/* Must be called with OVS mutex held. */
+static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct table_instance *new_ti = NULL;
+ struct table_instance *ti;
+
+ flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
+ ti = ovsl_dereference(table->ti);
+ table_instance_insert(ti, flow);
+ table->count++;
+
+ /* Expand table, if necessary, to make room. */
+ if (table->count > ti->n_buckets)
+ new_ti = table_instance_expand(ti, false);
+ else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
+ new_ti = table_instance_rehash(ti, ti->n_buckets, false);
+
+ if (new_ti) {
+ rcu_assign_pointer(table->ti, new_ti);
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ table->last_rehash = jiffies;
+ }
+}
+
+/* Must be called with OVS mutex held. */
+static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct table_instance *ti;
+
+ flow->ufid_table.hash = ufid_hash(&flow->id);
+ ti = ovsl_dereference(table->ufid_ti);
+ ufid_table_instance_insert(ti, flow);
+ table->ufid_count++;
+
+ /* Expand table, if necessary, to make room. */
+ if (table->ufid_count > ti->n_buckets) {
+ struct table_instance *new_ti;
+
+ new_ti = table_instance_expand(ti, true);
+ if (new_ti) {
+ rcu_assign_pointer(table->ufid_ti, new_ti);
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ }
+ }
+}
+
+/* Must be called with OVS mutex held. */
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ const struct sw_flow_mask *mask)
+{
+ int err;
+
+ err = flow_mask_insert(table, flow, mask);
+ if (err)
+ return err;
+ flow_key_insert(table, flow);
+ if (ovs_identifier_is_ufid(&flow->id))
+ flow_ufid_insert(table, flow);
+
+ return 0;
}
/* Initializes the flow module.
- * Returns zero if successful or a negative error code. */
+ * Returns zero if successful or a negative error code.
+ */
int ovs_flow_init(void)
{
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
- flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
- 0, NULL);
+ flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ + (nr_node_ids
+ * sizeof(struct flow_stats *)),
+ 0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
+ flow_stats_cache
+ = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (flow_stats_cache == NULL) {
+ kmem_cache_destroy(flow_cache);
+ flow_cache = NULL;
+ return -ENOMEM;
+ }
+
return 0;
}
/* Uninitializes the flow module. */
void ovs_flow_exit(void)
{
+ kmem_cache_destroy(flow_stats_cache);
kmem_cache_destroy(flow_cache);
}