2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define MASK_ARRAY_SIZE_MIN 16
51 #define REHASH_INTERVAL (10 * 60 * HZ)
53 #define MC_HASH_SHIFT 8
54 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
55 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
57 static struct kmem_cache *flow_cache;
58 struct kmem_cache *flow_stats_cache __read_mostly;
60 static u16 range_n_bytes(const struct sw_flow_key_range *range)
62 return range->end - range->start;
65 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
66 bool full, const struct sw_flow_mask *mask)
68 int start = full ? 0 : mask->range.start;
69 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
70 const long *m = (const long *)((const u8 *)&mask->key + start);
71 const long *s = (const long *)((const u8 *)src + start);
72 long *d = (long *)((u8 *)dst + start);
75 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
76 * if 'full' is false the memory outside of the 'mask->range' is left
77 * uninitialized. This can be used as an optimization when further
78 * operations on 'dst' only use contents within 'mask->range'.
80 for (i = 0; i < len; i += sizeof(long))
84 struct sw_flow *ovs_flow_alloc(void)
87 struct flow_stats *stats;
90 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
92 return ERR_PTR(-ENOMEM);
96 flow->id.ufid_len = 0;
97 flow->id.unmasked_key = NULL;
98 flow->stats_last_writer = NUMA_NO_NODE;
100 /* Initialize the default stat node. */
101 stats = kmem_cache_alloc_node(flow_stats_cache,
102 GFP_KERNEL | __GFP_ZERO, 0);
106 spin_lock_init(&stats->lock);
108 RCU_INIT_POINTER(flow->stats[0], stats);
112 RCU_INIT_POINTER(flow->stats[node], NULL);
116 kmem_cache_free(flow_cache, flow);
117 return ERR_PTR(-ENOMEM);
120 int ovs_flow_tbl_count(const struct flow_table *table)
125 static struct flex_array *alloc_buckets(unsigned int n_buckets)
127 struct flex_array *buckets;
130 buckets = flex_array_alloc(sizeof(struct hlist_head),
131 n_buckets, GFP_KERNEL);
135 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
137 flex_array_free(buckets);
141 for (i = 0; i < n_buckets; i++)
142 INIT_HLIST_HEAD((struct hlist_head *)
143 flex_array_get(buckets, i));
148 static void flow_free(struct sw_flow *flow)
152 if (ovs_identifier_is_key(&flow->id))
153 kfree(flow->id.unmasked_key);
154 kfree(rcu_dereference_raw(flow->sf_acts));
156 if (flow->stats[node])
157 kmem_cache_free(flow_stats_cache,
158 rcu_dereference_raw(flow->stats[node]));
159 kmem_cache_free(flow_cache, flow);
162 static void rcu_free_flow_callback(struct rcu_head *rcu)
164 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
169 static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
171 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
176 void ovs_flow_free(struct sw_flow *flow, bool deferred)
182 call_rcu(&flow->rcu, rcu_free_flow_callback);
187 static void free_buckets(struct flex_array *buckets)
189 flex_array_free(buckets);
193 static void __table_instance_destroy(struct table_instance *ti)
195 free_buckets(ti->buckets);
199 static struct table_instance *table_instance_alloc(int new_size)
201 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
206 ti->buckets = alloc_buckets(new_size);
212 ti->n_buckets = new_size;
214 ti->keep_flows = false;
215 get_random_bytes(&ti->hash_seed, sizeof(u32));
220 static void mask_array_rcu_cb(struct rcu_head *rcu)
222 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
227 static struct mask_array *tbl_mask_array_alloc(int size)
229 struct mask_array *new;
231 size = max(MASK_ARRAY_SIZE_MIN, size);
232 new = kzalloc(sizeof(struct mask_array) +
233 sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
245 struct mask_array *old;
246 struct mask_array *new;
248 new = tbl_mask_array_alloc(size);
252 old = ovsl_dereference(tbl->mask_array);
256 for (i = 0; i < old->max; i++) {
257 if (ovsl_dereference(old->masks[i]))
258 new->masks[count++] = old->masks[i];
263 rcu_assign_pointer(tbl->mask_array, new);
266 call_rcu(&old->rcu, mask_array_rcu_cb);
271 int ovs_flow_tbl_init(struct flow_table *table)
273 struct table_instance *ti, *ufid_ti;
274 struct mask_array *ma;
276 table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
277 MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry));
278 if (!table->mask_cache)
281 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
283 goto free_mask_cache;
285 ti = table_instance_alloc(TBL_MIN_BUCKETS);
287 goto free_mask_array;
289 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
293 rcu_assign_pointer(table->ti, ti);
294 rcu_assign_pointer(table->ufid_ti, ufid_ti);
295 rcu_assign_pointer(table->mask_array, ma);
296 table->last_rehash = jiffies;
298 table->ufid_count = 0;
302 __table_instance_destroy(ti);
306 free_percpu(table->mask_cache);
310 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
312 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
314 __table_instance_destroy(ti);
317 static void table_instance_destroy(struct table_instance *ti,
318 struct table_instance *ufid_ti,
330 for (i = 0; i < ti->n_buckets; i++) {
331 struct sw_flow *flow;
332 struct hlist_head *head = flex_array_get(ti->buckets, i);
333 struct hlist_node *n;
334 int ver = ti->node_ver;
335 int ufid_ver = ufid_ti->node_ver;
337 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
338 hlist_del_rcu(&flow->flow_table.node[ver]);
339 if (ovs_identifier_is_ufid(&flow->id))
340 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
341 ovs_flow_free(flow, deferred);
347 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
348 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
350 __table_instance_destroy(ti);
351 __table_instance_destroy(ufid_ti);
355 /* No need for locking this function is called from RCU callback or
358 void ovs_flow_tbl_destroy(struct flow_table *table)
360 struct table_instance *ti = rcu_dereference_raw(table->ti);
361 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
363 free_percpu(table->mask_cache);
364 kfree(rcu_dereference_raw(table->mask_array));
365 table_instance_destroy(ti, ufid_ti, false);
368 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
369 u32 *bucket, u32 *last)
371 struct sw_flow *flow;
372 struct hlist_head *head;
377 while (*bucket < ti->n_buckets) {
379 head = flex_array_get(ti->buckets, *bucket);
380 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
395 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
397 hash = jhash_1word(hash, ti->hash_seed);
398 return flex_array_get(ti->buckets,
399 (hash & (ti->n_buckets - 1)));
402 static void table_instance_insert(struct table_instance *ti,
403 struct sw_flow *flow)
405 struct hlist_head *head;
407 head = find_bucket(ti, flow->flow_table.hash);
408 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
411 static void ufid_table_instance_insert(struct table_instance *ti,
412 struct sw_flow *flow)
414 struct hlist_head *head;
416 head = find_bucket(ti, flow->ufid_table.hash);
417 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
420 static void flow_table_copy_flows(struct table_instance *old,
421 struct table_instance *new, bool ufid)
426 old_ver = old->node_ver;
427 new->node_ver = !old_ver;
429 /* Insert in new table. */
430 for (i = 0; i < old->n_buckets; i++) {
431 struct sw_flow *flow;
432 struct hlist_head *head;
434 head = flex_array_get(old->buckets, i);
437 hlist_for_each_entry(flow, head,
438 ufid_table.node[old_ver])
439 ufid_table_instance_insert(new, flow);
441 hlist_for_each_entry(flow, head,
442 flow_table.node[old_ver])
443 table_instance_insert(new, flow);
446 old->keep_flows = true;
449 static struct table_instance *table_instance_rehash(struct table_instance *ti,
450 int n_buckets, bool ufid)
452 struct table_instance *new_ti;
454 new_ti = table_instance_alloc(n_buckets);
458 flow_table_copy_flows(ti, new_ti, ufid);
463 int ovs_flow_tbl_flush(struct flow_table *flow_table)
465 struct table_instance *old_ti, *new_ti;
466 struct table_instance *old_ufid_ti, *new_ufid_ti;
468 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
471 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
475 old_ti = ovsl_dereference(flow_table->ti);
476 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
478 rcu_assign_pointer(flow_table->ti, new_ti);
479 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
480 flow_table->last_rehash = jiffies;
481 flow_table->count = 0;
482 flow_table->ufid_count = 0;
484 table_instance_destroy(old_ti, old_ufid_ti, true);
488 __table_instance_destroy(new_ti);
492 static u32 flow_hash(const struct sw_flow_key *key,
493 const struct sw_flow_key_range *range)
495 int key_start = range->start;
496 int key_end = range->end;
497 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
498 int hash_u32s = (key_end - key_start) >> 2;
500 /* Make sure number of hash bytes are multiple of u32. */
501 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
503 return jhash2(hash_key, hash_u32s, 0);
506 static int flow_key_start(const struct sw_flow_key *key)
508 if (key->tun_key.ipv4_dst)
511 return rounddown(offsetof(struct sw_flow_key, phy),
515 static bool cmp_key(const struct sw_flow_key *key1,
516 const struct sw_flow_key *key2,
517 int key_start, int key_end)
519 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
520 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
524 for (i = key_start; i < key_end; i += sizeof(long))
525 diffs |= *cp1++ ^ *cp2++;
530 static bool flow_cmp_masked_key(const struct sw_flow *flow,
531 const struct sw_flow_key *key,
532 const struct sw_flow_key_range *range)
534 return cmp_key(&flow->key, key, range->start, range->end);
537 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
538 const struct sw_flow_match *match)
540 struct sw_flow_key *key = match->key;
541 int key_start = flow_key_start(key);
542 int key_end = match->range.end;
544 BUG_ON(ovs_identifier_is_ufid(&flow->id));
545 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
548 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
549 const struct sw_flow_key *unmasked,
550 const struct sw_flow_mask *mask,
553 struct sw_flow *flow;
554 struct hlist_head *head;
556 struct sw_flow_key masked_key;
558 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
559 hash = flow_hash(&masked_key, &mask->range);
560 head = find_bucket(ti, hash);
562 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
563 if (flow->mask == mask && flow->flow_table.hash == hash &&
564 flow_cmp_masked_key(flow, &masked_key, &mask->range))
570 /* Flow lookup does full lookup on flow table. It starts with
571 * mask from index passed in *index.
573 static struct sw_flow *flow_lookup(struct flow_table *tbl,
574 struct table_instance *ti,
575 const struct mask_array *ma,
576 const struct sw_flow_key *key,
580 struct sw_flow_mask *mask;
581 struct sw_flow *flow;
584 if (*index < ma->max) {
585 mask = rcu_dereference_ovsl(ma->masks[*index]);
587 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
593 for (i = 0; i < ma->max; i++) {
598 mask = rcu_dereference_ovsl(ma->masks[i]);
602 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
603 if (flow) { /* Found */
613 * mask_cache maps flow to probable mask. This cache is not tightly
614 * coupled cache, It means updates to mask list can result in inconsistent
615 * cache entry in mask cache.
616 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
617 * In case of a hash collision the entry is hashed in next segment.
619 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
620 const struct sw_flow_key *key,
624 struct mask_array *ma = rcu_dereference(tbl->mask_array);
625 struct table_instance *ti = rcu_dereference(tbl->ti);
626 struct mask_cache_entry *entries, *ce;
627 struct sw_flow *flow;
632 if (unlikely(!skb_hash)) {
635 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
638 /* Pre and post recirulation flows usually have the same skb_hash
639 * value. To avoid hash collisions, rehash the 'skb_hash' with
642 skb_hash = jhash_1word(skb_hash, key->recirc_id);
646 entries = this_cpu_ptr(tbl->mask_cache);
648 /* Find the cache entry 'ce' to operate on. */
649 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
650 int index = hash & (MC_HASH_ENTRIES - 1);
651 struct mask_cache_entry *e;
654 if (e->skb_hash == skb_hash) {
655 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
662 if (!ce || e->skb_hash < ce->skb_hash)
663 ce = e; /* A better replacement cache candidate. */
665 hash >>= MC_HASH_SHIFT;
668 /* Cache miss, do full lookup. */
669 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
671 ce->skb_hash = skb_hash;
676 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
677 const struct sw_flow_key *key)
679 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
680 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
681 u32 __always_unused n_mask_hit;
684 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
687 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
688 const struct sw_flow_match *match)
690 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
693 /* Always called under ovs-mutex. */
694 for (i = 0; i < ma->max; i++) {
695 struct table_instance *ti = ovsl_dereference(tbl->ti);
696 u32 __always_unused n_mask_hit;
697 struct sw_flow_mask *mask;
698 struct sw_flow *flow;
700 mask = ovsl_dereference(ma->masks[i]);
703 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
704 if (flow && ovs_identifier_is_key(&flow->id) &&
705 ovs_flow_cmp_unmasked_key(flow, match))
711 static u32 ufid_hash(const struct sw_flow_id *sfid)
713 return jhash(sfid->ufid, sfid->ufid_len, 0);
716 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
717 const struct sw_flow_id *sfid)
719 if (flow->id.ufid_len != sfid->ufid_len)
722 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
725 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
727 if (ovs_identifier_is_ufid(&flow->id))
728 return flow_cmp_masked_key(flow, match->key, &match->range);
730 return ovs_flow_cmp_unmasked_key(flow, match);
733 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
734 const struct sw_flow_id *ufid)
736 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
737 struct sw_flow *flow;
738 struct hlist_head *head;
741 hash = ufid_hash(ufid);
742 head = find_bucket(ti, hash);
743 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
744 if (flow->ufid_table.hash == hash &&
745 ovs_flow_cmp_ufid(flow, ufid))
751 int ovs_flow_tbl_num_masks(const struct flow_table *table)
753 struct mask_array *ma;
755 ma = rcu_dereference_ovsl(table->mask_array);
759 static struct table_instance *table_instance_expand(struct table_instance *ti,
762 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
765 static void tbl_mask_array_delete_mask(struct mask_array *ma,
766 struct sw_flow_mask *mask)
770 /* Remove the deleted mask pointers from the array */
771 for (i = 0; i < ma->max; i++) {
772 if (mask == ovsl_dereference(ma->masks[i])) {
773 RCU_INIT_POINTER(ma->masks[i], NULL);
775 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
782 /* Remove 'mask' from the mask list, if it is not needed any more. */
783 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
786 /* ovs-lock is required to protect mask-refcount and
790 BUG_ON(!mask->ref_count);
793 if (!mask->ref_count) {
794 struct mask_array *ma;
796 ma = ovsl_dereference(tbl->mask_array);
797 tbl_mask_array_delete_mask(ma, mask);
799 /* Shrink the mask array if necessary. */
800 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
801 ma->count <= (ma->max / 3))
802 tbl_mask_array_realloc(tbl, ma->max / 2);
808 /* Must be called with OVS mutex held. */
809 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
811 struct table_instance *ti = ovsl_dereference(table->ti);
812 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
814 BUG_ON(table->count == 0);
815 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
817 if (ovs_identifier_is_ufid(&flow->id)) {
818 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
822 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
823 * accessible as long as the RCU read lock is held.
825 flow_mask_remove(table, flow->mask);
828 static struct sw_flow_mask *mask_alloc(void)
830 struct sw_flow_mask *mask;
832 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
839 static bool mask_equal(const struct sw_flow_mask *a,
840 const struct sw_flow_mask *b)
842 const u8 *a_ = (const u8 *)&a->key + a->range.start;
843 const u8 *b_ = (const u8 *)&b->key + b->range.start;
845 return (a->range.end == b->range.end)
846 && (a->range.start == b->range.start)
847 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
850 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
851 const struct sw_flow_mask *mask)
853 struct mask_array *ma;
856 ma = ovsl_dereference(tbl->mask_array);
857 for (i = 0; i < ma->max; i++) {
858 struct sw_flow_mask *t;
860 t = ovsl_dereference(ma->masks[i]);
861 if (t && mask_equal(mask, t))
868 /* Add 'mask' into the mask list, if it is not already there. */
869 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
870 const struct sw_flow_mask *new)
872 struct sw_flow_mask *mask;
874 mask = flow_mask_find(tbl, new);
876 struct mask_array *ma;
879 /* Allocate a new mask if none exsits. */
884 mask->key = new->key;
885 mask->range = new->range;
887 /* Add mask to mask-list. */
888 ma = ovsl_dereference(tbl->mask_array);
889 if (ma->count >= ma->max) {
892 err = tbl_mask_array_realloc(tbl, ma->max +
893 MASK_ARRAY_SIZE_MIN);
898 ma = ovsl_dereference(tbl->mask_array);
901 for (i = 0; i < ma->max; i++) {
902 struct sw_flow_mask *t;
904 t = ovsl_dereference(ma->masks[i]);
906 rcu_assign_pointer(ma->masks[i], mask);
913 BUG_ON(!mask->ref_count);
921 /* Must be called with OVS mutex held. */
922 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
924 struct table_instance *new_ti = NULL;
925 struct table_instance *ti;
927 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
928 ti = ovsl_dereference(table->ti);
929 table_instance_insert(ti, flow);
932 /* Expand table, if necessary, to make room. */
933 if (table->count > ti->n_buckets)
934 new_ti = table_instance_expand(ti, false);
935 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
936 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
939 rcu_assign_pointer(table->ti, new_ti);
940 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
941 table->last_rehash = jiffies;
945 /* Must be called with OVS mutex held. */
946 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
948 struct table_instance *ti;
950 flow->ufid_table.hash = ufid_hash(&flow->id);
951 ti = ovsl_dereference(table->ufid_ti);
952 ufid_table_instance_insert(ti, flow);
955 /* Expand table, if necessary, to make room. */
956 if (table->ufid_count > ti->n_buckets) {
957 struct table_instance *new_ti;
959 new_ti = table_instance_expand(ti, true);
961 rcu_assign_pointer(table->ufid_ti, new_ti);
962 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
967 /* Must be called with OVS mutex held. */
968 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
969 const struct sw_flow_mask *mask)
973 err = flow_mask_insert(table, flow, mask);
976 flow_key_insert(table, flow);
977 if (ovs_identifier_is_ufid(&flow->id))
978 flow_ufid_insert(table, flow);
983 /* Initializes the flow module.
984 * Returns zero if successful or a negative error code.
986 int ovs_flow_init(void)
988 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
989 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
991 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
993 * sizeof(struct flow_stats *)),
995 if (flow_cache == NULL)
999 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
1000 0, SLAB_HWCACHE_ALIGN, NULL);
1001 if (flow_stats_cache == NULL) {
1002 kmem_cache_destroy(flow_cache);
1010 /* Uninitializes the flow module. */
1011 void ovs_flow_exit(void)
1013 kmem_cache_destroy(flow_stats_cache);
1014 kmem_cache_destroy(flow_cache);