6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN 100
42 DEFINE_MUTEX(xfrm_cfg_mutex);
43 EXPORT_SYMBOL(xfrm_cfg_mutex);
45 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
46 static struct dst_entry *xfrm_policy_sk_bundles;
47 static DEFINE_RWLOCK(xfrm_policy_lock);
49 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
50 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
53 static struct kmem_cache *xfrm_dst_cache __read_mostly;
55 static void xfrm_init_pmtu(struct dst_entry *dst);
56 static int stale_bundle(struct dst_entry *dst);
57 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
58 static void xfrm_policy_queue_process(unsigned long arg);
60 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
64 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
66 const struct flowi4 *fl4 = &fl->u.ip4;
68 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
69 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
70 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
71 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
72 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
73 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
77 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
79 const struct flowi6 *fl6 = &fl->u.ip6;
81 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
82 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
83 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
84 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
85 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
86 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
89 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
90 unsigned short family)
94 return __xfrm4_selector_match(sel, fl);
96 return __xfrm6_selector_match(sel, fl);
101 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
103 struct xfrm_policy_afinfo *afinfo;
105 if (unlikely(family >= NPROTO))
108 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
109 if (unlikely(!afinfo))
114 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
119 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
120 const xfrm_address_t *saddr,
121 const xfrm_address_t *daddr,
124 struct xfrm_policy_afinfo *afinfo;
125 struct dst_entry *dst;
127 afinfo = xfrm_policy_get_afinfo(family);
128 if (unlikely(afinfo == NULL))
129 return ERR_PTR(-EAFNOSUPPORT);
131 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
133 xfrm_policy_put_afinfo(afinfo);
138 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
139 xfrm_address_t *prev_saddr,
140 xfrm_address_t *prev_daddr,
143 struct net *net = xs_net(x);
144 xfrm_address_t *saddr = &x->props.saddr;
145 xfrm_address_t *daddr = &x->id.daddr;
146 struct dst_entry *dst;
148 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
152 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
157 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
160 if (prev_saddr != saddr)
161 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
162 if (prev_daddr != daddr)
163 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
169 static inline unsigned long make_jiffies(long secs)
171 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
172 return MAX_SCHEDULE_TIMEOUT-1;
177 static void xfrm_policy_timer(unsigned long data)
179 struct xfrm_policy *xp = (struct xfrm_policy*)data;
180 unsigned long now = get_seconds();
181 long next = LONG_MAX;
185 read_lock(&xp->lock);
187 if (unlikely(xp->walk.dead))
190 dir = xfrm_policy_id2dir(xp->index);
192 if (xp->lft.hard_add_expires_seconds) {
193 long tmo = xp->lft.hard_add_expires_seconds +
194 xp->curlft.add_time - now;
200 if (xp->lft.hard_use_expires_seconds) {
201 long tmo = xp->lft.hard_use_expires_seconds +
202 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
208 if (xp->lft.soft_add_expires_seconds) {
209 long tmo = xp->lft.soft_add_expires_seconds +
210 xp->curlft.add_time - now;
213 tmo = XFRM_KM_TIMEOUT;
218 if (xp->lft.soft_use_expires_seconds) {
219 long tmo = xp->lft.soft_use_expires_seconds +
220 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
223 tmo = XFRM_KM_TIMEOUT;
230 km_policy_expired(xp, dir, 0, 0);
231 if (next != LONG_MAX &&
232 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
236 read_unlock(&xp->lock);
241 read_unlock(&xp->lock);
242 if (!xfrm_policy_delete(xp, dir))
243 km_policy_expired(xp, dir, 1, 0);
247 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
249 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
251 if (unlikely(pol->walk.dead))
259 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
261 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
263 return !pol->walk.dead;
266 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
268 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
271 static const struct flow_cache_ops xfrm_policy_fc_ops = {
272 .get = xfrm_policy_flo_get,
273 .check = xfrm_policy_flo_check,
274 .delete = xfrm_policy_flo_delete,
277 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
281 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
283 struct xfrm_policy *policy;
285 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
288 write_pnet(&policy->xp_net, net);
289 INIT_LIST_HEAD(&policy->walk.all);
290 INIT_HLIST_NODE(&policy->bydst);
291 INIT_HLIST_NODE(&policy->byidx);
292 rwlock_init(&policy->lock);
293 atomic_set(&policy->refcnt, 1);
294 skb_queue_head_init(&policy->polq.hold_queue);
295 setup_timer(&policy->timer, xfrm_policy_timer,
296 (unsigned long)policy);
297 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 (unsigned long)policy);
299 policy->flo.ops = &xfrm_policy_fc_ops;
303 EXPORT_SYMBOL(xfrm_policy_alloc);
305 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
307 void xfrm_policy_destroy(struct xfrm_policy *policy)
309 BUG_ON(!policy->walk.dead);
311 if (del_timer(&policy->timer))
314 security_xfrm_policy_free(policy->security);
317 EXPORT_SYMBOL(xfrm_policy_destroy);
319 static void xfrm_queue_purge(struct sk_buff_head *list)
323 while ((skb = skb_dequeue(list)) != NULL)
327 /* Rule must be locked. Release descentant resources, announce
328 * entry dead. The rule must be unlinked from lists to the moment.
331 static void xfrm_policy_kill(struct xfrm_policy *policy)
333 policy->walk.dead = 1;
335 atomic_inc(&policy->genid);
337 del_timer(&policy->polq.hold_timer);
338 xfrm_queue_purge(&policy->polq.hold_queue);
340 if (del_timer(&policy->timer))
341 xfrm_pol_put(policy);
343 xfrm_pol_put(policy);
346 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
348 static inline unsigned int idx_hash(struct net *net, u32 index)
350 return __idx_hash(index, net->xfrm.policy_idx_hmask);
353 static struct hlist_head *policy_hash_bysel(struct net *net,
354 const struct xfrm_selector *sel,
355 unsigned short family, int dir)
357 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
358 unsigned int hash = __sel_hash(sel, family, hmask);
360 return (hash == hmask + 1 ?
361 &net->xfrm.policy_inexact[dir] :
362 net->xfrm.policy_bydst[dir].table + hash);
365 static struct hlist_head *policy_hash_direct(struct net *net,
366 const xfrm_address_t *daddr,
367 const xfrm_address_t *saddr,
368 unsigned short family, int dir)
370 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
371 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
373 return net->xfrm.policy_bydst[dir].table + hash;
376 static void xfrm_dst_hash_transfer(struct hlist_head *list,
377 struct hlist_head *ndsttable,
378 unsigned int nhashmask)
380 struct hlist_node *tmp, *entry0 = NULL;
381 struct xfrm_policy *pol;
385 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
388 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
389 pol->family, nhashmask);
391 hlist_del(&pol->bydst);
392 hlist_add_head(&pol->bydst, ndsttable+h);
397 hlist_del(&pol->bydst);
398 hlist_add_after(entry0, &pol->bydst);
400 entry0 = &pol->bydst;
402 if (!hlist_empty(list)) {
408 static void xfrm_idx_hash_transfer(struct hlist_head *list,
409 struct hlist_head *nidxtable,
410 unsigned int nhashmask)
412 struct hlist_node *tmp;
413 struct xfrm_policy *pol;
415 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
418 h = __idx_hash(pol->index, nhashmask);
419 hlist_add_head(&pol->byidx, nidxtable+h);
423 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
425 return ((old_hmask + 1) << 1) - 1;
428 static void xfrm_bydst_resize(struct net *net, int dir)
430 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
431 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
432 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
433 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
434 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
440 write_lock_bh(&xfrm_policy_lock);
442 for (i = hmask; i >= 0; i--)
443 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
445 net->xfrm.policy_bydst[dir].table = ndst;
446 net->xfrm.policy_bydst[dir].hmask = nhashmask;
448 write_unlock_bh(&xfrm_policy_lock);
450 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
453 static void xfrm_byidx_resize(struct net *net, int total)
455 unsigned int hmask = net->xfrm.policy_idx_hmask;
456 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
457 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
458 struct hlist_head *oidx = net->xfrm.policy_byidx;
459 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
465 write_lock_bh(&xfrm_policy_lock);
467 for (i = hmask; i >= 0; i--)
468 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
470 net->xfrm.policy_byidx = nidx;
471 net->xfrm.policy_idx_hmask = nhashmask;
473 write_unlock_bh(&xfrm_policy_lock);
475 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
478 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
480 unsigned int cnt = net->xfrm.policy_count[dir];
481 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
486 if ((hmask + 1) < xfrm_policy_hashmax &&
493 static inline int xfrm_byidx_should_resize(struct net *net, int total)
495 unsigned int hmask = net->xfrm.policy_idx_hmask;
497 if ((hmask + 1) < xfrm_policy_hashmax &&
504 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
506 read_lock_bh(&xfrm_policy_lock);
507 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
508 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
509 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
510 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
511 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
512 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
513 si->spdhcnt = net->xfrm.policy_idx_hmask;
514 si->spdhmcnt = xfrm_policy_hashmax;
515 read_unlock_bh(&xfrm_policy_lock);
517 EXPORT_SYMBOL(xfrm_spd_getinfo);
519 static DEFINE_MUTEX(hash_resize_mutex);
520 static void xfrm_hash_resize(struct work_struct *work)
522 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
525 mutex_lock(&hash_resize_mutex);
528 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
529 if (xfrm_bydst_should_resize(net, dir, &total))
530 xfrm_bydst_resize(net, dir);
532 if (xfrm_byidx_should_resize(net, total))
533 xfrm_byidx_resize(net, total);
535 mutex_unlock(&hash_resize_mutex);
538 /* Generate new index... KAME seems to generate them ordered by cost
539 * of an absolute inpredictability of ordering of rules. This will not pass. */
540 static u32 xfrm_gen_index(struct net *net, int dir)
542 static u32 idx_generator;
545 struct hlist_head *list;
546 struct xfrm_policy *p;
550 idx = (idx_generator | dir);
554 list = net->xfrm.policy_byidx + idx_hash(net, idx);
556 hlist_for_each_entry(p, list, byidx) {
557 if (p->index == idx) {
567 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
569 u32 *p1 = (u32 *) s1;
570 u32 *p2 = (u32 *) s2;
571 int len = sizeof(struct xfrm_selector) / sizeof(u32);
574 for (i = 0; i < len; i++) {
582 static void xfrm_policy_requeue(struct xfrm_policy *old,
583 struct xfrm_policy *new)
585 struct xfrm_policy_queue *pq = &old->polq;
586 struct sk_buff_head list;
588 __skb_queue_head_init(&list);
590 spin_lock_bh(&pq->hold_queue.lock);
591 skb_queue_splice_init(&pq->hold_queue, &list);
592 del_timer(&pq->hold_timer);
593 spin_unlock_bh(&pq->hold_queue.lock);
595 if (skb_queue_empty(&list))
600 spin_lock_bh(&pq->hold_queue.lock);
601 skb_queue_splice(&list, &pq->hold_queue);
602 pq->timeout = XFRM_QUEUE_TMO_MIN;
603 mod_timer(&pq->hold_timer, jiffies);
604 spin_unlock_bh(&pq->hold_queue.lock);
607 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
608 struct xfrm_policy *pol)
610 u32 mark = policy->mark.v & policy->mark.m;
612 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
615 if ((mark & pol->mark.m) == pol->mark.v &&
616 policy->priority == pol->priority)
622 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
624 struct net *net = xp_net(policy);
625 struct xfrm_policy *pol;
626 struct xfrm_policy *delpol;
627 struct hlist_head *chain;
628 struct hlist_node *newpos;
630 write_lock_bh(&xfrm_policy_lock);
631 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
634 hlist_for_each_entry(pol, chain, bydst) {
635 if (pol->type == policy->type &&
636 !selector_cmp(&pol->selector, &policy->selector) &&
637 xfrm_policy_mark_match(policy, pol) &&
638 xfrm_sec_ctx_match(pol->security, policy->security) &&
641 write_unlock_bh(&xfrm_policy_lock);
645 if (policy->priority > pol->priority)
647 } else if (policy->priority >= pol->priority) {
648 newpos = &pol->bydst;
655 hlist_add_after(newpos, &policy->bydst);
657 hlist_add_head(&policy->bydst, chain);
658 xfrm_pol_hold(policy);
659 net->xfrm.policy_count[dir]++;
660 atomic_inc(&flow_cache_genid);
663 xfrm_policy_requeue(delpol, policy);
664 __xfrm_policy_unlink(delpol, dir);
666 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
667 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
668 policy->curlft.add_time = get_seconds();
669 policy->curlft.use_time = 0;
670 if (!mod_timer(&policy->timer, jiffies + HZ))
671 xfrm_pol_hold(policy);
672 list_add(&policy->walk.all, &net->xfrm.policy_all);
673 write_unlock_bh(&xfrm_policy_lock);
676 xfrm_policy_kill(delpol);
677 else if (xfrm_bydst_should_resize(net, dir, NULL))
678 schedule_work(&net->xfrm.policy_hash_work);
682 EXPORT_SYMBOL(xfrm_policy_insert);
684 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
685 int dir, struct xfrm_selector *sel,
686 struct xfrm_sec_ctx *ctx, int delete,
689 struct xfrm_policy *pol, *ret;
690 struct hlist_head *chain;
693 write_lock_bh(&xfrm_policy_lock);
694 chain = policy_hash_bysel(net, sel, sel->family, dir);
696 hlist_for_each_entry(pol, chain, bydst) {
697 if (pol->type == type &&
698 (mark & pol->mark.m) == pol->mark.v &&
699 !selector_cmp(sel, &pol->selector) &&
700 xfrm_sec_ctx_match(ctx, pol->security)) {
703 *err = security_xfrm_policy_delete(
706 write_unlock_bh(&xfrm_policy_lock);
709 __xfrm_policy_unlink(pol, dir);
715 write_unlock_bh(&xfrm_policy_lock);
718 xfrm_policy_kill(ret);
721 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
723 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
724 int dir, u32 id, int delete, int *err)
726 struct xfrm_policy *pol, *ret;
727 struct hlist_head *chain;
730 if (xfrm_policy_id2dir(id) != dir)
734 write_lock_bh(&xfrm_policy_lock);
735 chain = net->xfrm.policy_byidx + idx_hash(net, id);
737 hlist_for_each_entry(pol, chain, byidx) {
738 if (pol->type == type && pol->index == id &&
739 (mark & pol->mark.m) == pol->mark.v) {
742 *err = security_xfrm_policy_delete(
745 write_unlock_bh(&xfrm_policy_lock);
748 __xfrm_policy_unlink(pol, dir);
754 write_unlock_bh(&xfrm_policy_lock);
757 xfrm_policy_kill(ret);
760 EXPORT_SYMBOL(xfrm_policy_byid);
762 #ifdef CONFIG_SECURITY_NETWORK_XFRM
764 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
768 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
769 struct xfrm_policy *pol;
772 hlist_for_each_entry(pol,
773 &net->xfrm.policy_inexact[dir], bydst) {
774 if (pol->type != type)
776 err = security_xfrm_policy_delete(pol->security);
778 xfrm_audit_policy_delete(pol, 0,
779 audit_info->loginuid,
780 audit_info->sessionid,
785 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
786 hlist_for_each_entry(pol,
787 net->xfrm.policy_bydst[dir].table + i,
789 if (pol->type != type)
791 err = security_xfrm_policy_delete(
794 xfrm_audit_policy_delete(pol, 0,
795 audit_info->loginuid,
796 audit_info->sessionid,
807 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
813 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
815 int dir, err = 0, cnt = 0;
817 write_lock_bh(&xfrm_policy_lock);
819 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
823 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
824 struct xfrm_policy *pol;
828 hlist_for_each_entry(pol,
829 &net->xfrm.policy_inexact[dir], bydst) {
830 if (pol->type != type)
832 __xfrm_policy_unlink(pol, dir);
833 write_unlock_bh(&xfrm_policy_lock);
836 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
837 audit_info->sessionid,
840 xfrm_policy_kill(pol);
842 write_lock_bh(&xfrm_policy_lock);
846 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
848 hlist_for_each_entry(pol,
849 net->xfrm.policy_bydst[dir].table + i,
851 if (pol->type != type)
853 __xfrm_policy_unlink(pol, dir);
854 write_unlock_bh(&xfrm_policy_lock);
857 xfrm_audit_policy_delete(pol, 1,
858 audit_info->loginuid,
859 audit_info->sessionid,
861 xfrm_policy_kill(pol);
863 write_lock_bh(&xfrm_policy_lock);
872 write_unlock_bh(&xfrm_policy_lock);
875 EXPORT_SYMBOL(xfrm_policy_flush);
877 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
878 int (*func)(struct xfrm_policy *, int, int, void*),
881 struct xfrm_policy *pol;
882 struct xfrm_policy_walk_entry *x;
885 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
886 walk->type != XFRM_POLICY_TYPE_ANY)
889 if (list_empty(&walk->walk.all) && walk->seq != 0)
892 write_lock_bh(&xfrm_policy_lock);
893 if (list_empty(&walk->walk.all))
894 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
896 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
897 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
900 pol = container_of(x, struct xfrm_policy, walk);
901 if (walk->type != XFRM_POLICY_TYPE_ANY &&
902 walk->type != pol->type)
904 error = func(pol, xfrm_policy_id2dir(pol->index),
907 list_move_tail(&walk->walk.all, &x->all);
912 if (walk->seq == 0) {
916 list_del_init(&walk->walk.all);
918 write_unlock_bh(&xfrm_policy_lock);
921 EXPORT_SYMBOL(xfrm_policy_walk);
923 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
925 INIT_LIST_HEAD(&walk->walk.all);
930 EXPORT_SYMBOL(xfrm_policy_walk_init);
932 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
934 if (list_empty(&walk->walk.all))
937 write_lock_bh(&xfrm_policy_lock);
938 list_del(&walk->walk.all);
939 write_unlock_bh(&xfrm_policy_lock);
941 EXPORT_SYMBOL(xfrm_policy_walk_done);
944 * Find policy to apply to this flow.
946 * Returns 0 if policy found, else an -errno.
948 static int xfrm_policy_match(const struct xfrm_policy *pol,
949 const struct flowi *fl,
950 u8 type, u16 family, int dir)
952 const struct xfrm_selector *sel = &pol->selector;
956 if (pol->family != family ||
957 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
961 match = xfrm_selector_match(sel, fl, family);
963 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
969 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
970 const struct flowi *fl,
974 struct xfrm_policy *pol, *ret;
975 const xfrm_address_t *daddr, *saddr;
976 struct hlist_head *chain;
979 daddr = xfrm_flowi_daddr(fl, family);
980 saddr = xfrm_flowi_saddr(fl, family);
981 if (unlikely(!daddr || !saddr))
984 read_lock_bh(&xfrm_policy_lock);
985 chain = policy_hash_direct(net, daddr, saddr, family, dir);
987 hlist_for_each_entry(pol, chain, bydst) {
988 err = xfrm_policy_match(pol, fl, type, family, dir);
998 priority = ret->priority;
1002 chain = &net->xfrm.policy_inexact[dir];
1003 hlist_for_each_entry(pol, chain, bydst) {
1004 err = xfrm_policy_match(pol, fl, type, family, dir);
1012 } else if (pol->priority < priority) {
1020 read_unlock_bh(&xfrm_policy_lock);
1025 static struct xfrm_policy *
1026 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1028 #ifdef CONFIG_XFRM_SUB_POLICY
1029 struct xfrm_policy *pol;
1031 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1035 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1038 static int flow_to_policy_dir(int dir)
1040 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1041 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1042 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1048 return XFRM_POLICY_IN;
1050 return XFRM_POLICY_OUT;
1052 return XFRM_POLICY_FWD;
1056 static struct flow_cache_object *
1057 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1058 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1060 struct xfrm_policy *pol;
1063 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1065 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1066 if (IS_ERR_OR_NULL(pol))
1067 return ERR_CAST(pol);
1069 /* Resolver returns two references:
1070 * one for cache and one for caller of flow_cache_lookup() */
1076 static inline int policy_to_flow_dir(int dir)
1078 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1079 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1080 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1084 case XFRM_POLICY_IN:
1086 case XFRM_POLICY_OUT:
1087 return FLOW_DIR_OUT;
1088 case XFRM_POLICY_FWD:
1089 return FLOW_DIR_FWD;
1093 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1094 const struct flowi *fl)
1096 struct xfrm_policy *pol;
1098 read_lock_bh(&xfrm_policy_lock);
1099 if ((pol = sk->sk_policy[dir]) != NULL) {
1100 bool match = xfrm_selector_match(&pol->selector, fl,
1105 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1109 err = security_xfrm_policy_lookup(pol->security,
1111 policy_to_flow_dir(dir));
1114 else if (err == -ESRCH)
1122 read_unlock_bh(&xfrm_policy_lock);
1126 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1128 struct net *net = xp_net(pol);
1129 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1132 list_add(&pol->walk.all, &net->xfrm.policy_all);
1133 hlist_add_head(&pol->bydst, chain);
1134 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1135 net->xfrm.policy_count[dir]++;
1138 if (xfrm_bydst_should_resize(net, dir, NULL))
1139 schedule_work(&net->xfrm.policy_hash_work);
1142 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1145 struct net *net = xp_net(pol);
1147 if (hlist_unhashed(&pol->bydst))
1150 hlist_del(&pol->bydst);
1151 hlist_del(&pol->byidx);
1152 list_del(&pol->walk.all);
1153 net->xfrm.policy_count[dir]--;
1158 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1160 write_lock_bh(&xfrm_policy_lock);
1161 pol = __xfrm_policy_unlink(pol, dir);
1162 write_unlock_bh(&xfrm_policy_lock);
1164 xfrm_policy_kill(pol);
1169 EXPORT_SYMBOL(xfrm_policy_delete);
1171 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1173 struct net *net = xp_net(pol);
1174 struct xfrm_policy *old_pol;
1176 #ifdef CONFIG_XFRM_SUB_POLICY
1177 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1181 write_lock_bh(&xfrm_policy_lock);
1182 old_pol = sk->sk_policy[dir];
1183 sk->sk_policy[dir] = pol;
1185 pol->curlft.add_time = get_seconds();
1186 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1187 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1191 xfrm_policy_requeue(old_pol, pol);
1193 /* Unlinking succeeds always. This is the only function
1194 * allowed to delete or replace socket policy.
1196 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1198 write_unlock_bh(&xfrm_policy_lock);
1201 xfrm_policy_kill(old_pol);
1206 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1208 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1211 newp->selector = old->selector;
1212 if (security_xfrm_policy_clone(old->security,
1215 return NULL; /* ENOMEM */
1217 newp->lft = old->lft;
1218 newp->curlft = old->curlft;
1219 newp->mark = old->mark;
1220 newp->action = old->action;
1221 newp->flags = old->flags;
1222 newp->xfrm_nr = old->xfrm_nr;
1223 newp->index = old->index;
1224 newp->type = old->type;
1225 memcpy(newp->xfrm_vec, old->xfrm_vec,
1226 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1227 write_lock_bh(&xfrm_policy_lock);
1228 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1229 write_unlock_bh(&xfrm_policy_lock);
1235 int __xfrm_sk_clone_policy(struct sock *sk)
1237 struct xfrm_policy *p0 = sk->sk_policy[0],
1238 *p1 = sk->sk_policy[1];
1240 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1241 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1243 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1249 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1250 unsigned short family)
1253 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1255 if (unlikely(afinfo == NULL))
1257 err = afinfo->get_saddr(net, local, remote);
1258 xfrm_policy_put_afinfo(afinfo);
1262 /* Resolve list of templates for the flow, given policy. */
1265 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1266 struct xfrm_state **xfrm, unsigned short family)
1268 struct net *net = xp_net(policy);
1271 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1272 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1275 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1276 struct xfrm_state *x;
1277 xfrm_address_t *remote = daddr;
1278 xfrm_address_t *local = saddr;
1279 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1281 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1282 tmpl->mode == XFRM_MODE_BEET) {
1283 remote = &tmpl->id.daddr;
1284 local = &tmpl->saddr;
1285 if (xfrm_addr_any(local, tmpl->encap_family)) {
1286 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1293 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1295 if (x && x->km.state == XFRM_STATE_VALID) {
1302 error = (x->km.state == XFRM_STATE_ERROR ?
1306 else if (error == -ESRCH)
1309 if (!tmpl->optional)
1315 for (nx--; nx>=0; nx--)
1316 xfrm_state_put(xfrm[nx]);
1321 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1322 struct xfrm_state **xfrm, unsigned short family)
1324 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1325 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1331 for (i = 0; i < npols; i++) {
1332 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1337 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1345 /* found states are sorted for outbound processing */
1347 xfrm_state_sort(xfrm, tpp, cnx, family);
1352 for (cnx--; cnx>=0; cnx--)
1353 xfrm_state_put(tpp[cnx]);
1358 /* Check that the bundle accepts the flow and its components are
1362 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1364 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1370 tos = afinfo->get_tos(fl);
1372 xfrm_policy_put_afinfo(afinfo);
1377 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1379 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1380 struct dst_entry *dst = &xdst->u.dst;
1382 if (xdst->route == NULL) {
1383 /* Dummy bundle - if it has xfrms we were not
1384 * able to build bundle as template resolution failed.
1385 * It means we need to try again resolving. */
1386 if (xdst->num_xfrms > 0)
1388 } else if (dst->flags & DST_XFRM_QUEUE) {
1392 if (stale_bundle(dst))
1400 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1402 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1403 struct dst_entry *dst = &xdst->u.dst;
1407 if (stale_bundle(dst))
1413 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1415 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1416 struct dst_entry *dst = &xdst->u.dst;
1421 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1422 .get = xfrm_bundle_flo_get,
1423 .check = xfrm_bundle_flo_check,
1424 .delete = xfrm_bundle_flo_delete,
1427 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1429 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1430 struct dst_ops *dst_ops;
1431 struct xfrm_dst *xdst;
1434 return ERR_PTR(-EINVAL);
1438 dst_ops = &net->xfrm.xfrm4_dst_ops;
1440 #if IS_ENABLED(CONFIG_IPV6)
1442 dst_ops = &net->xfrm.xfrm6_dst_ops;
1448 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1451 struct dst_entry *dst = &xdst->u.dst;
1453 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1454 xdst->flo.ops = &xfrm_bundle_fc_ops;
1455 if (afinfo->init_dst)
1456 afinfo->init_dst(net, xdst);
1458 xdst = ERR_PTR(-ENOBUFS);
1460 xfrm_policy_put_afinfo(afinfo);
1465 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1468 struct xfrm_policy_afinfo *afinfo =
1469 xfrm_policy_get_afinfo(dst->ops->family);
1475 err = afinfo->init_path(path, dst, nfheader_len);
1477 xfrm_policy_put_afinfo(afinfo);
1482 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1483 const struct flowi *fl)
1485 struct xfrm_policy_afinfo *afinfo =
1486 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1492 err = afinfo->fill_dst(xdst, dev, fl);
1494 xfrm_policy_put_afinfo(afinfo);
1500 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1501 * all the metrics... Shortly, bundle a bundle.
1504 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1505 struct xfrm_state **xfrm, int nx,
1506 const struct flowi *fl,
1507 struct dst_entry *dst)
1509 struct net *net = xp_net(policy);
1510 unsigned long now = jiffies;
1511 struct net_device *dev;
1512 struct xfrm_mode *inner_mode;
1513 struct dst_entry *dst_prev = NULL;
1514 struct dst_entry *dst0 = NULL;
1518 int nfheader_len = 0;
1519 int trailer_len = 0;
1521 int family = policy->selector.family;
1522 xfrm_address_t saddr, daddr;
1524 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1526 tos = xfrm_get_tos(fl, family);
1533 for (; i < nx; i++) {
1534 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1535 struct dst_entry *dst1 = &xdst->u.dst;
1537 err = PTR_ERR(xdst);
1543 if (xfrm[i]->sel.family == AF_UNSPEC) {
1544 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1545 xfrm_af2proto(family));
1547 err = -EAFNOSUPPORT;
1552 inner_mode = xfrm[i]->inner_mode;
1557 dst_prev->child = dst_clone(dst1);
1558 dst1->flags |= DST_NOHASH;
1562 dst_copy_metrics(dst1, dst);
1564 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1565 family = xfrm[i]->props.family;
1566 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1574 dst1->xfrm = xfrm[i];
1575 xdst->xfrm_genid = xfrm[i]->genid;
1577 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1578 dst1->flags |= DST_HOST;
1579 dst1->lastuse = now;
1581 dst1->input = dst_discard;
1582 dst1->output = inner_mode->afinfo->output;
1584 dst1->next = dst_prev;
1587 header_len += xfrm[i]->props.header_len;
1588 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1589 nfheader_len += xfrm[i]->props.header_len;
1590 trailer_len += xfrm[i]->props.trailer_len;
1593 dst_prev->child = dst;
1601 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1602 xfrm_init_pmtu(dst_prev);
1604 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1605 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1607 err = xfrm_fill_dst(xdst, dev, fl);
1611 dst_prev->header_len = header_len;
1612 dst_prev->trailer_len = trailer_len;
1613 header_len -= xdst->u.dst.xfrm->props.header_len;
1614 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1622 xfrm_state_put(xfrm[i]);
1626 dst0 = ERR_PTR(err);
1631 xfrm_dst_alloc_copy(void **target, const void *src, int size)
1634 *target = kmalloc(size, GFP_ATOMIC);
1638 memcpy(*target, src, size);
1643 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1645 #ifdef CONFIG_XFRM_SUB_POLICY
1646 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1647 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1655 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1657 #ifdef CONFIG_XFRM_SUB_POLICY
1658 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1659 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1665 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1666 struct xfrm_policy **pols,
1667 int *num_pols, int *num_xfrms)
1671 if (*num_pols == 0 || !pols[0]) {
1676 if (IS_ERR(pols[0]))
1677 return PTR_ERR(pols[0]);
1679 *num_xfrms = pols[0]->xfrm_nr;
1681 #ifdef CONFIG_XFRM_SUB_POLICY
1682 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1683 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1684 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1685 XFRM_POLICY_TYPE_MAIN,
1689 if (IS_ERR(pols[1])) {
1690 xfrm_pols_put(pols, *num_pols);
1691 return PTR_ERR(pols[1]);
1694 (*num_xfrms) += pols[1]->xfrm_nr;
1698 for (i = 0; i < *num_pols; i++) {
1699 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1709 static struct xfrm_dst *
1710 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1711 const struct flowi *fl, u16 family,
1712 struct dst_entry *dst_orig)
1714 struct net *net = xp_net(pols[0]);
1715 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1716 struct dst_entry *dst;
1717 struct xfrm_dst *xdst;
1720 /* Try to instantiate a bundle */
1721 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1723 if (err != 0 && err != -EAGAIN)
1724 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1725 return ERR_PTR(err);
1728 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1730 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1731 return ERR_CAST(dst);
1734 xdst = (struct xfrm_dst *)dst;
1735 xdst->num_xfrms = err;
1737 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1739 err = xfrm_dst_update_origin(dst, fl);
1740 if (unlikely(err)) {
1742 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1743 return ERR_PTR(err);
1746 xdst->num_pols = num_pols;
1747 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1748 xdst->policy_genid = atomic_read(&pols[0]->genid);
1753 static void xfrm_policy_queue_process(unsigned long arg)
1756 struct sk_buff *skb;
1758 struct dst_entry *dst;
1759 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1760 struct xfrm_policy_queue *pq = &pol->polq;
1762 struct sk_buff_head list;
1764 spin_lock(&pq->hold_queue.lock);
1765 skb = skb_peek(&pq->hold_queue);
1768 xfrm_decode_session(skb, &fl, dst->ops->family);
1769 spin_unlock(&pq->hold_queue.lock);
1771 dst_hold(dst->path);
1772 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1777 if (dst->flags & DST_XFRM_QUEUE) {
1780 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1783 pq->timeout = pq->timeout << 1;
1784 mod_timer(&pq->hold_timer, jiffies + pq->timeout);
1790 __skb_queue_head_init(&list);
1792 spin_lock(&pq->hold_queue.lock);
1794 skb_queue_splice_init(&pq->hold_queue, &list);
1795 spin_unlock(&pq->hold_queue.lock);
1797 while (!skb_queue_empty(&list)) {
1798 skb = __skb_dequeue(&list);
1800 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1801 dst_hold(skb_dst(skb)->path);
1802 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1811 skb_dst_set(skb, dst);
1813 err = dst_output(skb);
1820 xfrm_queue_purge(&pq->hold_queue);
1823 static int xdst_queue_output(struct sk_buff *skb)
1825 unsigned long sched_next;
1826 struct dst_entry *dst = skb_dst(skb);
1827 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1828 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
1830 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1837 spin_lock_bh(&pq->hold_queue.lock);
1840 pq->timeout = XFRM_QUEUE_TMO_MIN;
1842 sched_next = jiffies + pq->timeout;
1844 if (del_timer(&pq->hold_timer)) {
1845 if (time_before(pq->hold_timer.expires, sched_next))
1846 sched_next = pq->hold_timer.expires;
1849 __skb_queue_tail(&pq->hold_queue, skb);
1850 mod_timer(&pq->hold_timer, sched_next);
1852 spin_unlock_bh(&pq->hold_queue.lock);
1857 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1858 struct dst_entry *dst,
1859 const struct flowi *fl,
1864 struct net_device *dev;
1865 struct dst_entry *dst1;
1866 struct xfrm_dst *xdst;
1868 xdst = xfrm_alloc_dst(net, family);
1872 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1873 (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1876 dst1 = &xdst->u.dst;
1880 dst_copy_metrics(dst1, dst);
1882 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1883 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1884 dst1->lastuse = jiffies;
1886 dst1->input = dst_discard;
1887 dst1->output = xdst_queue_output;
1893 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1900 err = xfrm_fill_dst(xdst, dev, fl);
1909 xdst = ERR_PTR(err);
1913 static struct flow_cache_object *
1914 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1915 struct flow_cache_object *oldflo, void *ctx)
1917 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1918 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1919 struct xfrm_dst *xdst, *new_xdst;
1920 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1922 /* Check if the policies from old bundle are usable */
1925 xdst = container_of(oldflo, struct xfrm_dst, flo);
1926 num_pols = xdst->num_pols;
1927 num_xfrms = xdst->num_xfrms;
1929 for (i = 0; i < num_pols; i++) {
1930 pols[i] = xdst->pols[i];
1931 pol_dead |= pols[i]->walk.dead;
1934 dst_free(&xdst->u.dst);
1942 /* Resolve policies to use if we couldn't get them from
1943 * previous cache entry */
1946 pols[0] = __xfrm_policy_lookup(net, fl, family,
1947 flow_to_policy_dir(dir));
1948 err = xfrm_expand_policies(fl, family, pols,
1949 &num_pols, &num_xfrms);
1955 goto make_dummy_bundle;
1958 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1959 if (IS_ERR(new_xdst)) {
1960 err = PTR_ERR(new_xdst);
1964 goto make_dummy_bundle;
1965 dst_hold(&xdst->u.dst);
1967 } else if (new_xdst == NULL) {
1970 goto make_dummy_bundle;
1971 xdst->num_xfrms = 0;
1972 dst_hold(&xdst->u.dst);
1976 /* Kill the previous bundle */
1978 /* The policies were stolen for newly generated bundle */
1980 dst_free(&xdst->u.dst);
1983 /* Flow cache does not have reference, it dst_free()'s,
1984 * but we do need to return one reference for original caller */
1985 dst_hold(&new_xdst->u.dst);
1986 return &new_xdst->flo;
1989 /* We found policies, but there's no bundles to instantiate:
1990 * either because the policy blocks, has no transformations or
1991 * we could not build template (no xfrm_states).*/
1992 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
1994 xfrm_pols_put(pols, num_pols);
1995 return ERR_CAST(xdst);
1997 xdst->num_pols = num_pols;
1998 xdst->num_xfrms = num_xfrms;
1999 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
2001 dst_hold(&xdst->u.dst);
2005 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2008 dst_free(&xdst->u.dst);
2010 xfrm_pols_put(pols, num_pols);
2011 return ERR_PTR(err);
2014 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2015 struct dst_entry *dst_orig)
2017 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2018 struct dst_entry *ret;
2021 dst_release(dst_orig);
2022 return ERR_PTR(-EINVAL);
2024 ret = afinfo->blackhole_route(net, dst_orig);
2026 xfrm_policy_put_afinfo(afinfo);
2031 /* Main function: finds/creates a bundle for given flow.
2033 * At the moment we eat a raw IP route. Mostly to speed up lookups
2034 * on interfaces with disabled IPsec.
2036 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2037 const struct flowi *fl,
2038 struct sock *sk, int flags)
2040 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2041 struct flow_cache_object *flo;
2042 struct xfrm_dst *xdst;
2043 struct dst_entry *dst, *route;
2044 u16 family = dst_orig->ops->family;
2045 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2046 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2053 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2055 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2056 err = xfrm_expand_policies(fl, family, pols,
2057 &num_pols, &num_xfrms);
2062 if (num_xfrms <= 0) {
2063 drop_pols = num_pols;
2067 xdst = xfrm_resolve_and_create_bundle(
2071 xfrm_pols_put(pols, num_pols);
2072 err = PTR_ERR(xdst);
2074 } else if (xdst == NULL) {
2076 drop_pols = num_pols;
2080 dst_hold(&xdst->u.dst);
2082 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2083 xdst->u.dst.next = xfrm_policy_sk_bundles;
2084 xfrm_policy_sk_bundles = &xdst->u.dst;
2085 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2087 route = xdst->route;
2092 /* To accelerate a bit... */
2093 if ((dst_orig->flags & DST_NOXFRM) ||
2094 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2097 flo = flow_cache_lookup(net, fl, family, dir,
2098 xfrm_bundle_lookup, dst_orig);
2105 xdst = container_of(flo, struct xfrm_dst, flo);
2107 num_pols = xdst->num_pols;
2108 num_xfrms = xdst->num_xfrms;
2109 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
2110 route = xdst->route;
2114 if (route == NULL && num_xfrms > 0) {
2115 /* The only case when xfrm_bundle_lookup() returns a
2116 * bundle with null route, is when the template could
2117 * not be resolved. It means policies are there, but
2118 * bundle could not be created, since we don't yet
2119 * have the xfrm_state's. We need to wait for KM to
2120 * negotiate new SA's or bail out with error.*/
2121 if (net->xfrm.sysctl_larval_drop) {
2122 /* EREMOTE tells the caller to generate
2123 * a one-shot blackhole route. */
2125 xfrm_pols_put(pols, drop_pols);
2126 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2128 return make_blackhole(net, family, dst_orig);
2130 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
2131 DECLARE_WAITQUEUE(wait, current);
2133 add_wait_queue(&net->xfrm.km_waitq, &wait);
2134 set_current_state(TASK_INTERRUPTIBLE);
2136 set_current_state(TASK_RUNNING);
2137 remove_wait_queue(&net->xfrm.km_waitq, &wait);
2139 if (!signal_pending(current)) {
2148 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2156 if ((flags & XFRM_LOOKUP_ICMP) &&
2157 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2162 for (i = 0; i < num_pols; i++)
2163 pols[i]->curlft.use_time = get_seconds();
2165 if (num_xfrms < 0) {
2166 /* Prohibit the flow */
2167 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2170 } else if (num_xfrms > 0) {
2171 /* Flow transformed */
2172 dst_release(dst_orig);
2174 /* Flow passes untransformed */
2179 xfrm_pols_put(pols, drop_pols);
2180 if (dst && dst->xfrm &&
2181 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2182 dst->flags |= DST_XFRM_TUNNEL;
2186 if (!(flags & XFRM_LOOKUP_ICMP)) {
2194 dst_release(dst_orig);
2195 xfrm_pols_put(pols, drop_pols);
2196 return ERR_PTR(err);
2198 EXPORT_SYMBOL(xfrm_lookup);
2201 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2203 struct xfrm_state *x;
2205 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2207 x = skb->sp->xvec[idx];
2208 if (!x->type->reject)
2210 return x->type->reject(x, skb, fl);
2213 /* When skb is transformed back to its "native" form, we have to
2214 * check policy restrictions. At the moment we make this in maximally
2215 * stupid way. Shame on me. :-) Of course, connected sockets must
2216 * have policy cached at them.
2220 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2221 unsigned short family)
2223 if (xfrm_state_kern(x))
2224 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2225 return x->id.proto == tmpl->id.proto &&
2226 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2227 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2228 x->props.mode == tmpl->mode &&
2229 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2230 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2231 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2232 xfrm_state_addr_cmp(tmpl, x, family));
2236 * 0 or more than 0 is returned when validation is succeeded (either bypass
2237 * because of optional transport mode, or next index of the mathced secpath
2238 * state with the template.
2239 * -1 is returned when no matching template is found.
2240 * Otherwise "-2 - errored_index" is returned.
2243 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2244 unsigned short family)
2248 if (tmpl->optional) {
2249 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2253 for (; idx < sp->len; idx++) {
2254 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2256 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2265 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2266 unsigned int family, int reverse)
2268 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2271 if (unlikely(afinfo == NULL))
2272 return -EAFNOSUPPORT;
2274 afinfo->decode_session(skb, fl, reverse);
2275 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2276 xfrm_policy_put_afinfo(afinfo);
2279 EXPORT_SYMBOL(__xfrm_decode_session);
2281 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2283 for (; k < sp->len; k++) {
2284 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2293 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2294 unsigned short family)
2296 struct net *net = dev_net(skb->dev);
2297 struct xfrm_policy *pol;
2298 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2307 reverse = dir & ~XFRM_POLICY_MASK;
2308 dir &= XFRM_POLICY_MASK;
2309 fl_dir = policy_to_flow_dir(dir);
2311 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2312 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2316 nf_nat_decode_session(skb, &fl, family);
2318 /* First, check used SA against their selectors. */
2322 for (i=skb->sp->len-1; i>=0; i--) {
2323 struct xfrm_state *x = skb->sp->xvec[i];
2324 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2325 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2332 if (sk && sk->sk_policy[dir]) {
2333 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2335 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2341 struct flow_cache_object *flo;
2343 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2344 xfrm_policy_lookup, NULL);
2345 if (IS_ERR_OR_NULL(flo))
2346 pol = ERR_CAST(flo);
2348 pol = container_of(flo, struct xfrm_policy, flo);
2352 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2357 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2358 xfrm_secpath_reject(xerr_idx, skb, &fl);
2359 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2365 pol->curlft.use_time = get_seconds();
2369 #ifdef CONFIG_XFRM_SUB_POLICY
2370 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2371 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2375 if (IS_ERR(pols[1])) {
2376 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2379 pols[1]->curlft.use_time = get_seconds();
2385 if (pol->action == XFRM_POLICY_ALLOW) {
2386 struct sec_path *sp;
2387 static struct sec_path dummy;
2388 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2389 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2390 struct xfrm_tmpl **tpp = tp;
2394 if ((sp = skb->sp) == NULL)
2397 for (pi = 0; pi < npols; pi++) {
2398 if (pols[pi] != pol &&
2399 pols[pi]->action != XFRM_POLICY_ALLOW) {
2400 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2403 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2404 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2407 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2408 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2412 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2416 /* For each tunnel xfrm, find the first matching tmpl.
2417 * For each tmpl before that, find corresponding xfrm.
2418 * Order is _important_. Later we will implement
2419 * some barriers, but at the moment barriers
2420 * are implied between each two transformations.
2422 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2423 k = xfrm_policy_ok(tpp[i], sp, k, family);
2426 /* "-2 - errored_index" returned */
2428 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2433 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2434 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2438 xfrm_pols_put(pols, npols);
2441 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2444 xfrm_secpath_reject(xerr_idx, skb, &fl);
2446 xfrm_pols_put(pols, npols);
2449 EXPORT_SYMBOL(__xfrm_policy_check);
2451 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2453 struct net *net = dev_net(skb->dev);
2455 struct dst_entry *dst;
2458 if (xfrm_decode_session(skb, &fl, family) < 0) {
2459 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2465 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2470 skb_dst_set(skb, dst);
2473 EXPORT_SYMBOL(__xfrm_route_forward);
2475 /* Optimize later using cookies and generation ids. */
2477 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2479 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2480 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2481 * get validated by dst_ops->check on every use. We do this
2482 * because when a normal route referenced by an XFRM dst is
2483 * obsoleted we do not go looking around for all parent
2484 * referencing XFRM dsts so that we can invalidate them. It
2485 * is just too much work. Instead we make the checks here on
2486 * every use. For example:
2488 * XFRM dst A --> IPv4 dst X
2490 * X is the "xdst->route" of A (X is also the "dst->path" of A
2491 * in this example). If X is marked obsolete, "A" will not
2492 * notice. That's what we are validating here via the
2493 * stale_bundle() check.
2495 * When a policy's bundle is pruned, we dst_free() the XFRM
2496 * dst which causes it's ->obsolete field to be set to
2497 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2498 * this, we want to force a new route lookup.
2500 if (dst->obsolete < 0 && !stale_bundle(dst))
2506 static int stale_bundle(struct dst_entry *dst)
2508 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2511 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2513 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2514 dst->dev = dev_net(dev)->loopback_dev;
2519 EXPORT_SYMBOL(xfrm_dst_ifdown);
2521 static void xfrm_link_failure(struct sk_buff *skb)
2523 /* Impossible. Such dst must be popped before reaches point of failure. */
2526 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2529 if (dst->obsolete) {
2537 static void __xfrm_garbage_collect(struct net *net)
2539 struct dst_entry *head, *next;
2541 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2542 head = xfrm_policy_sk_bundles;
2543 xfrm_policy_sk_bundles = NULL;
2544 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2553 void xfrm_garbage_collect(struct net *net)
2556 __xfrm_garbage_collect(net);
2558 EXPORT_SYMBOL(xfrm_garbage_collect);
2560 static void xfrm_garbage_collect_deferred(struct net *net)
2562 flow_cache_flush_deferred();
2563 __xfrm_garbage_collect(net);
2566 static void xfrm_init_pmtu(struct dst_entry *dst)
2569 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2570 u32 pmtu, route_mtu_cached;
2572 pmtu = dst_mtu(dst->child);
2573 xdst->child_mtu_cached = pmtu;
2575 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2577 route_mtu_cached = dst_mtu(xdst->route);
2578 xdst->route_mtu_cached = route_mtu_cached;
2580 if (pmtu > route_mtu_cached)
2581 pmtu = route_mtu_cached;
2583 dst_metric_set(dst, RTAX_MTU, pmtu);
2584 } while ((dst = dst->next));
2587 /* Check that the bundle accepts the flow and its components are
2591 static int xfrm_bundle_ok(struct xfrm_dst *first)
2593 struct dst_entry *dst = &first->u.dst;
2594 struct xfrm_dst *last;
2597 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2598 (dst->dev && !netif_running(dst->dev)))
2601 if (dst->flags & DST_XFRM_QUEUE)
2607 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2609 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2611 if (xdst->xfrm_genid != dst->xfrm->genid)
2613 if (xdst->num_pols > 0 &&
2614 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2617 mtu = dst_mtu(dst->child);
2618 if (xdst->child_mtu_cached != mtu) {
2620 xdst->child_mtu_cached = mtu;
2623 if (!dst_check(xdst->route, xdst->route_cookie))
2625 mtu = dst_mtu(xdst->route);
2626 if (xdst->route_mtu_cached != mtu) {
2628 xdst->route_mtu_cached = mtu;
2632 } while (dst->xfrm);
2637 mtu = last->child_mtu_cached;
2641 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2642 if (mtu > last->route_mtu_cached)
2643 mtu = last->route_mtu_cached;
2644 dst_metric_set(dst, RTAX_MTU, mtu);
2649 last = (struct xfrm_dst *)last->u.dst.next;
2650 last->child_mtu_cached = mtu;
2656 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2658 return dst_metric_advmss(dst->path);
2661 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2663 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2665 return mtu ? : dst_mtu(dst->path);
2668 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2669 struct sk_buff *skb,
2672 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2675 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2679 if (unlikely(afinfo == NULL))
2681 if (unlikely(afinfo->family >= NPROTO))
2682 return -EAFNOSUPPORT;
2683 spin_lock(&xfrm_policy_afinfo_lock);
2684 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2687 struct dst_ops *dst_ops = afinfo->dst_ops;
2688 if (likely(dst_ops->kmem_cachep == NULL))
2689 dst_ops->kmem_cachep = xfrm_dst_cache;
2690 if (likely(dst_ops->check == NULL))
2691 dst_ops->check = xfrm_dst_check;
2692 if (likely(dst_ops->default_advmss == NULL))
2693 dst_ops->default_advmss = xfrm_default_advmss;
2694 if (likely(dst_ops->mtu == NULL))
2695 dst_ops->mtu = xfrm_mtu;
2696 if (likely(dst_ops->negative_advice == NULL))
2697 dst_ops->negative_advice = xfrm_negative_advice;
2698 if (likely(dst_ops->link_failure == NULL))
2699 dst_ops->link_failure = xfrm_link_failure;
2700 if (likely(dst_ops->neigh_lookup == NULL))
2701 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2702 if (likely(afinfo->garbage_collect == NULL))
2703 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2704 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2706 spin_unlock(&xfrm_policy_afinfo_lock);
2710 struct dst_ops *xfrm_dst_ops;
2712 switch (afinfo->family) {
2714 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2716 #if IS_ENABLED(CONFIG_IPV6)
2718 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2724 *xfrm_dst_ops = *afinfo->dst_ops;
2730 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2732 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2735 if (unlikely(afinfo == NULL))
2737 if (unlikely(afinfo->family >= NPROTO))
2738 return -EAFNOSUPPORT;
2739 spin_lock(&xfrm_policy_afinfo_lock);
2740 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2741 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2744 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2747 spin_unlock(&xfrm_policy_afinfo_lock);
2749 struct dst_ops *dst_ops = afinfo->dst_ops;
2753 dst_ops->kmem_cachep = NULL;
2754 dst_ops->check = NULL;
2755 dst_ops->negative_advice = NULL;
2756 dst_ops->link_failure = NULL;
2757 afinfo->garbage_collect = NULL;
2761 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2763 static void __net_init xfrm_dst_ops_init(struct net *net)
2765 struct xfrm_policy_afinfo *afinfo;
2768 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2770 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2771 #if IS_ENABLED(CONFIG_IPV6)
2772 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2774 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2779 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2781 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2785 xfrm_garbage_collect(dev_net(dev));
2790 static struct notifier_block xfrm_dev_notifier = {
2791 .notifier_call = xfrm_dev_event,
2794 #ifdef CONFIG_XFRM_STATISTICS
2795 static int __net_init xfrm_statistics_init(struct net *net)
2799 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2800 sizeof(struct linux_xfrm_mib),
2801 __alignof__(struct linux_xfrm_mib)) < 0)
2803 rv = xfrm_proc_init(net);
2805 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2809 static void xfrm_statistics_fini(struct net *net)
2811 xfrm_proc_fini(net);
2812 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2815 static int __net_init xfrm_statistics_init(struct net *net)
2820 static void xfrm_statistics_fini(struct net *net)
2825 static int __net_init xfrm_policy_init(struct net *net)
2827 unsigned int hmask, sz;
2830 if (net_eq(net, &init_net))
2831 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2832 sizeof(struct xfrm_dst),
2833 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2837 sz = (hmask+1) * sizeof(struct hlist_head);
2839 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2840 if (!net->xfrm.policy_byidx)
2842 net->xfrm.policy_idx_hmask = hmask;
2844 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2845 struct xfrm_policy_hash *htab;
2847 net->xfrm.policy_count[dir] = 0;
2848 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2850 htab = &net->xfrm.policy_bydst[dir];
2851 htab->table = xfrm_hash_alloc(sz);
2854 htab->hmask = hmask;
2857 INIT_LIST_HEAD(&net->xfrm.policy_all);
2858 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2859 if (net_eq(net, &init_net))
2860 register_netdevice_notifier(&xfrm_dev_notifier);
2864 for (dir--; dir >= 0; dir--) {
2865 struct xfrm_policy_hash *htab;
2867 htab = &net->xfrm.policy_bydst[dir];
2868 xfrm_hash_free(htab->table, sz);
2870 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2875 static void xfrm_policy_fini(struct net *net)
2877 struct xfrm_audit audit_info;
2881 flush_work(&net->xfrm.policy_hash_work);
2882 #ifdef CONFIG_XFRM_SUB_POLICY
2883 audit_info.loginuid = INVALID_UID;
2884 audit_info.sessionid = -1;
2885 audit_info.secid = 0;
2886 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2888 audit_info.loginuid = INVALID_UID;
2889 audit_info.sessionid = -1;
2890 audit_info.secid = 0;
2891 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2893 WARN_ON(!list_empty(&net->xfrm.policy_all));
2895 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2896 struct xfrm_policy_hash *htab;
2898 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2900 htab = &net->xfrm.policy_bydst[dir];
2901 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2902 WARN_ON(!hlist_empty(htab->table));
2903 xfrm_hash_free(htab->table, sz);
2906 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2907 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2908 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2911 static int __net_init xfrm_net_init(struct net *net)
2915 rv = xfrm_statistics_init(net);
2917 goto out_statistics;
2918 rv = xfrm_state_init(net);
2921 rv = xfrm_policy_init(net);
2924 xfrm_dst_ops_init(net);
2925 rv = xfrm_sysctl_init(net);
2931 xfrm_policy_fini(net);
2933 xfrm_state_fini(net);
2935 xfrm_statistics_fini(net);
2940 static void __net_exit xfrm_net_exit(struct net *net)
2942 xfrm_sysctl_fini(net);
2943 xfrm_policy_fini(net);
2944 xfrm_state_fini(net);
2945 xfrm_statistics_fini(net);
2948 static struct pernet_operations __net_initdata xfrm_net_ops = {
2949 .init = xfrm_net_init,
2950 .exit = xfrm_net_exit,
2953 void __init xfrm_init(void)
2955 register_pernet_subsys(&xfrm_net_ops);
2959 #ifdef CONFIG_AUDITSYSCALL
2960 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2961 struct audit_buffer *audit_buf)
2963 struct xfrm_sec_ctx *ctx = xp->security;
2964 struct xfrm_selector *sel = &xp->selector;
2967 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2968 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2970 switch(sel->family) {
2972 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2973 if (sel->prefixlen_s != 32)
2974 audit_log_format(audit_buf, " src_prefixlen=%d",
2976 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2977 if (sel->prefixlen_d != 32)
2978 audit_log_format(audit_buf, " dst_prefixlen=%d",
2982 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2983 if (sel->prefixlen_s != 128)
2984 audit_log_format(audit_buf, " src_prefixlen=%d",
2986 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2987 if (sel->prefixlen_d != 128)
2988 audit_log_format(audit_buf, " dst_prefixlen=%d",
2994 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2995 kuid_t auid, u32 sessionid, u32 secid)
2997 struct audit_buffer *audit_buf;
2999 audit_buf = xfrm_audit_start("SPD-add");
3000 if (audit_buf == NULL)
3002 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3003 audit_log_format(audit_buf, " res=%u", result);
3004 xfrm_audit_common_policyinfo(xp, audit_buf);
3005 audit_log_end(audit_buf);
3007 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3009 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3010 kuid_t auid, u32 sessionid, u32 secid)
3012 struct audit_buffer *audit_buf;
3014 audit_buf = xfrm_audit_start("SPD-delete");
3015 if (audit_buf == NULL)
3017 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3018 audit_log_format(audit_buf, " res=%u", result);
3019 xfrm_audit_common_policyinfo(xp, audit_buf);
3020 audit_log_end(audit_buf);
3022 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3025 #ifdef CONFIG_XFRM_MIGRATE
3026 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3027 const struct xfrm_selector *sel_tgt)
3029 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3030 if (sel_tgt->family == sel_cmp->family &&
3031 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3033 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3035 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3036 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3040 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3047 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3050 struct xfrm_policy *pol, *ret = NULL;
3051 struct hlist_head *chain;
3054 read_lock_bh(&xfrm_policy_lock);
3055 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
3056 hlist_for_each_entry(pol, chain, bydst) {
3057 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3058 pol->type == type) {
3060 priority = ret->priority;
3064 chain = &init_net.xfrm.policy_inexact[dir];
3065 hlist_for_each_entry(pol, chain, bydst) {
3066 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3067 pol->type == type &&
3068 pol->priority < priority) {
3077 read_unlock_bh(&xfrm_policy_lock);
3082 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3086 if (t->mode == m->mode && t->id.proto == m->proto &&
3087 (m->reqid == 0 || t->reqid == m->reqid)) {
3089 case XFRM_MODE_TUNNEL:
3090 case XFRM_MODE_BEET:
3091 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3093 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3098 case XFRM_MODE_TRANSPORT:
3099 /* in case of transport mode, template does not store
3100 any IP addresses, hence we just compare mode and
3111 /* update endpoint address(es) of template(s) */
3112 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3113 struct xfrm_migrate *m, int num_migrate)
3115 struct xfrm_migrate *mp;
3118 write_lock_bh(&pol->lock);
3119 if (unlikely(pol->walk.dead)) {
3120 /* target policy has been deleted */
3121 write_unlock_bh(&pol->lock);
3125 for (i = 0; i < pol->xfrm_nr; i++) {
3126 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3127 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3130 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3131 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3133 /* update endpoints */
3134 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3135 sizeof(pol->xfrm_vec[i].id.daddr));
3136 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3137 sizeof(pol->xfrm_vec[i].saddr));
3138 pol->xfrm_vec[i].encap_family = mp->new_family;
3140 atomic_inc(&pol->genid);
3144 write_unlock_bh(&pol->lock);
3152 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3156 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3159 for (i = 0; i < num_migrate; i++) {
3160 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3162 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3165 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3166 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3169 /* check if there is any duplicated entry */
3170 for (j = i + 1; j < num_migrate; j++) {
3171 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3172 sizeof(m[i].old_daddr)) &&
3173 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3174 sizeof(m[i].old_saddr)) &&
3175 m[i].proto == m[j].proto &&
3176 m[i].mode == m[j].mode &&
3177 m[i].reqid == m[j].reqid &&
3178 m[i].old_family == m[j].old_family)
3186 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3187 struct xfrm_migrate *m, int num_migrate,
3188 struct xfrm_kmaddress *k)
3190 int i, err, nx_cur = 0, nx_new = 0;
3191 struct xfrm_policy *pol = NULL;
3192 struct xfrm_state *x, *xc;
3193 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3194 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3195 struct xfrm_migrate *mp;
3197 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3200 /* Stage 1 - find policy */
3201 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
3206 /* Stage 2 - find and update state(s) */
3207 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3208 if ((x = xfrm_migrate_state_find(mp))) {
3211 if ((xc = xfrm_state_migrate(x, mp))) {
3221 /* Stage 3 - update policy */
3222 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3225 /* Stage 4 - delete old state(s) */
3227 xfrm_states_put(x_cur, nx_cur);
3228 xfrm_states_delete(x_cur, nx_cur);
3231 /* Stage 5 - announce */
3232 km_migrate(sel, dir, type, m, num_migrate, k);
3244 xfrm_states_put(x_cur, nx_cur);
3246 xfrm_states_delete(x_new, nx_new);
3250 EXPORT_SYMBOL(xfrm_migrate);