1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <net/flow_dissector.h>
26 #include <linux/atomic.h>
27 #include <linux/security.h>
28 #include <net/net_namespace.h>
30 struct flow_cache_entry {
32 struct hlist_node hlist;
33 struct list_head gc_list;
40 struct flow_cache_object *object;
43 struct flow_flush_info {
44 struct flow_cache *cache;
46 struct completion completion;
49 static struct kmem_cache *flow_cachep __read_mostly;
51 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
52 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
54 static void flow_cache_new_hashrnd(unsigned long arg)
56 struct flow_cache *fc = (void *) arg;
59 for_each_possible_cpu(i)
60 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
62 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
63 add_timer(&fc->rnd_timer);
66 static int flow_entry_valid(struct flow_cache_entry *fle,
67 struct netns_xfrm *xfrm)
69 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
71 if (fle->object && !fle->object->ops->check(fle->object))
76 static void flow_entry_kill(struct flow_cache_entry *fle,
77 struct netns_xfrm *xfrm)
80 fle->object->ops->delete(fle->object);
81 kmem_cache_free(flow_cachep, fle);
84 static void flow_cache_gc_task(struct work_struct *work)
86 struct list_head gc_list;
87 struct flow_cache_entry *fce, *n;
88 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
91 INIT_LIST_HEAD(&gc_list);
92 spin_lock_bh(&xfrm->flow_cache_gc_lock);
93 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
94 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
96 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
97 flow_entry_kill(fce, xfrm);
100 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
101 int deleted, struct list_head *gc_list,
102 struct netns_xfrm *xfrm)
105 fcp->hash_count -= deleted;
106 spin_lock_bh(&xfrm->flow_cache_gc_lock);
107 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
108 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
109 schedule_work(&xfrm->flow_cache_gc_work);
113 static void __flow_cache_shrink(struct flow_cache *fc,
114 struct flow_cache_percpu *fcp,
117 struct flow_cache_entry *fle;
118 struct hlist_node *tmp;
121 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
124 for (i = 0; i < flow_cache_hash_size(fc); i++) {
127 hlist_for_each_entry_safe(fle, tmp,
128 &fcp->hash_table[i], u.hlist) {
129 if (saved < shrink_to &&
130 flow_entry_valid(fle, xfrm)) {
134 hlist_del(&fle->u.hlist);
135 list_add_tail(&fle->u.gc_list, &gc_list);
140 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
143 static void flow_cache_shrink(struct flow_cache *fc,
144 struct flow_cache_percpu *fcp)
146 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
148 __flow_cache_shrink(fc, fcp, shrink_to);
151 static void flow_new_hash_rnd(struct flow_cache *fc,
152 struct flow_cache_percpu *fcp)
154 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
155 fcp->hash_rnd_recalc = 0;
156 __flow_cache_shrink(fc, fcp, 0);
159 static u32 flow_hash_code(struct flow_cache *fc,
160 struct flow_cache_percpu *fcp,
161 const struct flowi *key,
164 const u32 *k = (const u32 *) key;
165 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
167 return jhash2(k, length, fcp->hash_rnd)
168 & (flow_cache_hash_size(fc) - 1);
171 /* I hear what you're saying, use memcmp. But memcmp cannot make
172 * important assumptions that we can here, such as alignment.
174 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
177 const flow_compare_t *k1, *k1_lim, *k2;
179 k1 = (const flow_compare_t *) key1;
180 k1_lim = k1 + keysize;
182 k2 = (const flow_compare_t *) key2;
187 } while (k1 < k1_lim);
192 struct flow_cache_object *
193 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
194 flow_resolve_t resolver, void *ctx)
196 struct flow_cache *fc = &net->xfrm.flow_cache_global;
197 struct flow_cache_percpu *fcp;
198 struct flow_cache_entry *fle, *tfle;
199 struct flow_cache_object *flo;
204 fcp = this_cpu_ptr(fc->percpu);
209 keysize = flow_key_size(family);
213 /* Packet really early in init? Making flow_cache_init a
214 * pre-smp initcall would solve this. --RR */
215 if (!fcp->hash_table)
218 if (fcp->hash_rnd_recalc)
219 flow_new_hash_rnd(fc, fcp);
221 hash = flow_hash_code(fc, fcp, key, keysize);
222 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
223 if (tfle->net == net &&
224 tfle->family == family &&
226 flow_key_compare(key, &tfle->key, keysize) == 0) {
232 if (unlikely(!fle)) {
233 if (fcp->hash_count > fc->high_watermark)
234 flow_cache_shrink(fc, fcp);
236 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
239 fle->family = family;
241 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
243 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
246 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
250 flo = flo->ops->get(flo);
253 } else if (fle->object) {
255 flo->ops->delete(flo);
265 flo = resolver(net, key, family, dir, flo, ctx);
267 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
273 if (!IS_ERR_OR_NULL(flo))
274 flo->ops->delete(flo);
280 EXPORT_SYMBOL(flow_cache_lookup);
282 static void flow_cache_flush_tasklet(unsigned long data)
284 struct flow_flush_info *info = (void *)data;
285 struct flow_cache *fc = info->cache;
286 struct flow_cache_percpu *fcp;
287 struct flow_cache_entry *fle;
288 struct hlist_node *tmp;
291 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
294 fcp = this_cpu_ptr(fc->percpu);
295 for (i = 0; i < flow_cache_hash_size(fc); i++) {
296 hlist_for_each_entry_safe(fle, tmp,
297 &fcp->hash_table[i], u.hlist) {
298 if (flow_entry_valid(fle, xfrm))
302 hlist_del(&fle->u.hlist);
303 list_add_tail(&fle->u.gc_list, &gc_list);
307 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
309 if (atomic_dec_and_test(&info->cpuleft))
310 complete(&info->completion);
314 * Return whether a cpu needs flushing. Conservatively, we assume
315 * the presence of any entries means the core may require flushing,
316 * since the flow_cache_ops.check() function may assume it's running
317 * on the same core as the per-cpu cache component.
319 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
321 struct flow_cache_percpu *fcp;
324 fcp = per_cpu_ptr(fc->percpu, cpu);
325 for (i = 0; i < flow_cache_hash_size(fc); i++)
326 if (!hlist_empty(&fcp->hash_table[i]))
331 static void flow_cache_flush_per_cpu(void *data)
333 struct flow_flush_info *info = data;
334 struct tasklet_struct *tasklet;
336 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
337 tasklet->data = (unsigned long)info;
338 tasklet_schedule(tasklet);
341 void flow_cache_flush(struct net *net)
343 struct flow_flush_info info;
347 /* Track which cpus need flushing to avoid disturbing all cores. */
348 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
352 /* Don't want cpus going down or up during this. */
354 mutex_lock(&net->xfrm.flow_flush_sem);
355 info.cache = &net->xfrm.flow_cache_global;
356 for_each_online_cpu(i)
357 if (!flow_cache_percpu_empty(info.cache, i))
358 cpumask_set_cpu(i, mask);
359 atomic_set(&info.cpuleft, cpumask_weight(mask));
360 if (atomic_read(&info.cpuleft) == 0)
363 init_completion(&info.completion);
366 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
367 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
369 flow_cache_flush_tasklet((unsigned long)&info);
372 wait_for_completion(&info.completion);
375 mutex_unlock(&net->xfrm.flow_flush_sem);
377 free_cpumask_var(mask);
380 static void flow_cache_flush_task(struct work_struct *work)
382 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
383 flow_cache_flush_work);
384 struct net *net = container_of(xfrm, struct net, xfrm);
386 flow_cache_flush(net);
389 void flow_cache_flush_deferred(struct net *net)
391 schedule_work(&net->xfrm.flow_cache_flush_work);
394 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
396 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
397 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
399 if (!fcp->hash_table) {
400 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
401 if (!fcp->hash_table) {
402 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
405 fcp->hash_rnd_recalc = 1;
407 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
412 static int flow_cache_cpu(struct notifier_block *nfb,
413 unsigned long action,
416 struct flow_cache *fc = container_of(nfb, struct flow_cache,
418 int res, cpu = (unsigned long) hcpu;
419 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
423 case CPU_UP_PREPARE_FROZEN:
424 res = flow_cache_cpu_prepare(fc, cpu);
426 return notifier_from_errno(res);
429 case CPU_DEAD_FROZEN:
430 __flow_cache_shrink(fc, fcp, 0);
436 int flow_cache_init(struct net *net)
439 struct flow_cache *fc = &net->xfrm.flow_cache_global;
442 flow_cachep = kmem_cache_create("flow_cache",
443 sizeof(struct flow_cache_entry),
444 0, SLAB_PANIC, NULL);
445 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
446 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
447 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
448 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
449 mutex_init(&net->xfrm.flow_flush_sem);
452 fc->low_watermark = 2 * flow_cache_hash_size(fc);
453 fc->high_watermark = 4 * flow_cache_hash_size(fc);
455 fc->percpu = alloc_percpu(struct flow_cache_percpu);
459 cpu_notifier_register_begin();
461 for_each_online_cpu(i) {
462 if (flow_cache_cpu_prepare(fc, i))
465 fc->hotcpu_notifier = (struct notifier_block){
466 .notifier_call = flow_cache_cpu,
468 __register_hotcpu_notifier(&fc->hotcpu_notifier);
470 cpu_notifier_register_done();
472 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
474 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
475 add_timer(&fc->rnd_timer);
480 for_each_possible_cpu(i) {
481 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
482 kfree(fcp->hash_table);
483 fcp->hash_table = NULL;
486 cpu_notifier_register_done();
488 free_percpu(fc->percpu);
493 EXPORT_SYMBOL(flow_cache_init);
495 void flow_cache_fini(struct net *net)
498 struct flow_cache *fc = &net->xfrm.flow_cache_global;
500 del_timer_sync(&fc->rnd_timer);
501 unregister_hotcpu_notifier(&fc->hotcpu_notifier);
503 for_each_possible_cpu(i) {
504 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
505 kfree(fcp->hash_table);
506 fcp->hash_table = NULL;
509 free_percpu(fc->percpu);
512 EXPORT_SYMBOL(flow_cache_fini);
514 __u32 __get_hash_from_flowi6(struct flowi6 *fl6, struct flow_keys *keys)
516 memset(keys, 0, sizeof(*keys));
518 memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
519 sizeof(keys->addrs.v6addrs.src));
520 memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
521 sizeof(keys->addrs.v6addrs.dst));
522 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
523 keys->ports.src = fl6->fl6_sport;
524 keys->ports.dst = fl6->fl6_dport;
525 keys->keyid.keyid = fl6->fl6_gre_key;
526 keys->tags.flow_label = (__force u32)fl6->flowlabel;
527 keys->basic.ip_proto = fl6->flowi6_proto;
529 return flow_hash_from_keys(keys);
531 EXPORT_SYMBOL(__get_hash_from_flowi6);
533 __u32 __get_hash_from_flowi4(struct flowi4 *fl4, struct flow_keys *keys)
535 memset(keys, 0, sizeof(*keys));
537 keys->addrs.v4addrs.src = fl4->saddr;
538 keys->addrs.v4addrs.dst = fl4->daddr;
539 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
540 keys->ports.src = fl4->fl4_sport;
541 keys->ports.dst = fl4->fl4_dport;
542 keys->keyid.keyid = fl4->fl4_gre_key;
543 keys->basic.ip_proto = fl4->flowi4_proto;
545 return flow_hash_from_keys(keys);
547 EXPORT_SYMBOL(__get_hash_from_flowi4);