Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[cascardo/linux.git] / net / xfrm / xfrm_state.c
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      YOSHIFUJI Hideaki @USAGI
10  *              Split up af-specific functions
11  *      Derek Atkins <derek@ihtfp.com>
12  *              Add UDP Encapsulation
13  *      
14  */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
22
23 /* Each xfrm_state may be linked to two tables:
24
25    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26    2. Hash table by daddr to find what SAs exist for given
27       destination/tunnel endpoint. (output)
28  */
29
30 static DEFINE_SPINLOCK(xfrm_state_lock);
31
32 /* Hash table to find appropriate SA towards given target (endpoint
33  * of tunnel or destination of transport mode) allowed by selector.
34  *
35  * Main use is finding SA after policy selected tunnel or transport mode.
36  * Also, it can be used by ah/esp icmp error handler to find offending SA.
37  */
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
40
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
43
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
46
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
50
51 static int xfrm_state_gc_flush_bundles;
52
53 static int __xfrm_state_delete(struct xfrm_state *x);
54
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
57
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
60
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
62 {
63         if (del_timer(&x->timer))
64                 BUG();
65         kfree(x->aalg);
66         kfree(x->ealg);
67         kfree(x->calg);
68         kfree(x->encap);
69         if (x->type) {
70                 x->type->destructor(x);
71                 xfrm_put_type(x->type);
72         }
73         kfree(x);
74 }
75
76 static void xfrm_state_gc_task(void *data)
77 {
78         struct xfrm_state *x;
79         struct list_head *entry, *tmp;
80         struct list_head gc_list = LIST_HEAD_INIT(gc_list);
81
82         if (xfrm_state_gc_flush_bundles) {
83                 xfrm_state_gc_flush_bundles = 0;
84                 xfrm_flush_bundles();
85         }
86
87         spin_lock_bh(&xfrm_state_gc_lock);
88         list_splice_init(&xfrm_state_gc_list, &gc_list);
89         spin_unlock_bh(&xfrm_state_gc_lock);
90
91         list_for_each_safe(entry, tmp, &gc_list) {
92                 x = list_entry(entry, struct xfrm_state, bydst);
93                 xfrm_state_gc_destroy(x);
94         }
95         wake_up(&km_waitq);
96 }
97
98 static inline unsigned long make_jiffies(long secs)
99 {
100         if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
101                 return MAX_SCHEDULE_TIMEOUT-1;
102         else
103                 return secs*HZ;
104 }
105
106 static void xfrm_timer_handler(unsigned long data)
107 {
108         struct xfrm_state *x = (struct xfrm_state*)data;
109         unsigned long now = (unsigned long)xtime.tv_sec;
110         long next = LONG_MAX;
111         int warn = 0;
112
113         spin_lock(&x->lock);
114         if (x->km.state == XFRM_STATE_DEAD)
115                 goto out;
116         if (x->km.state == XFRM_STATE_EXPIRED)
117                 goto expired;
118         if (x->lft.hard_add_expires_seconds) {
119                 long tmo = x->lft.hard_add_expires_seconds +
120                         x->curlft.add_time - now;
121                 if (tmo <= 0)
122                         goto expired;
123                 if (tmo < next)
124                         next = tmo;
125         }
126         if (x->lft.hard_use_expires_seconds) {
127                 long tmo = x->lft.hard_use_expires_seconds +
128                         (x->curlft.use_time ? : now) - now;
129                 if (tmo <= 0)
130                         goto expired;
131                 if (tmo < next)
132                         next = tmo;
133         }
134         if (x->km.dying)
135                 goto resched;
136         if (x->lft.soft_add_expires_seconds) {
137                 long tmo = x->lft.soft_add_expires_seconds +
138                         x->curlft.add_time - now;
139                 if (tmo <= 0)
140                         warn = 1;
141                 else if (tmo < next)
142                         next = tmo;
143         }
144         if (x->lft.soft_use_expires_seconds) {
145                 long tmo = x->lft.soft_use_expires_seconds +
146                         (x->curlft.use_time ? : now) - now;
147                 if (tmo <= 0)
148                         warn = 1;
149                 else if (tmo < next)
150                         next = tmo;
151         }
152
153         x->km.dying = warn;
154         if (warn)
155                 km_state_expired(x, 0);
156 resched:
157         if (next != LONG_MAX &&
158             !mod_timer(&x->timer, jiffies + make_jiffies(next)))
159                 xfrm_state_hold(x);
160         goto out;
161
162 expired:
163         if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
164                 x->km.state = XFRM_STATE_EXPIRED;
165                 wake_up(&km_waitq);
166                 next = 2;
167                 goto resched;
168         }
169         if (!__xfrm_state_delete(x) && x->id.spi)
170                 km_state_expired(x, 1);
171
172 out:
173         spin_unlock(&x->lock);
174         xfrm_state_put(x);
175 }
176
177 struct xfrm_state *xfrm_state_alloc(void)
178 {
179         struct xfrm_state *x;
180
181         x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
182
183         if (x) {
184                 memset(x, 0, sizeof(struct xfrm_state));
185                 atomic_set(&x->refcnt, 1);
186                 atomic_set(&x->tunnel_users, 0);
187                 INIT_LIST_HEAD(&x->bydst);
188                 INIT_LIST_HEAD(&x->byspi);
189                 init_timer(&x->timer);
190                 x->timer.function = xfrm_timer_handler;
191                 x->timer.data     = (unsigned long)x;
192                 x->curlft.add_time = (unsigned long)xtime.tv_sec;
193                 x->lft.soft_byte_limit = XFRM_INF;
194                 x->lft.soft_packet_limit = XFRM_INF;
195                 x->lft.hard_byte_limit = XFRM_INF;
196                 x->lft.hard_packet_limit = XFRM_INF;
197                 spin_lock_init(&x->lock);
198         }
199         return x;
200 }
201 EXPORT_SYMBOL(xfrm_state_alloc);
202
203 void __xfrm_state_destroy(struct xfrm_state *x)
204 {
205         BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
206
207         spin_lock_bh(&xfrm_state_gc_lock);
208         list_add(&x->bydst, &xfrm_state_gc_list);
209         spin_unlock_bh(&xfrm_state_gc_lock);
210         schedule_work(&xfrm_state_gc_work);
211 }
212 EXPORT_SYMBOL(__xfrm_state_destroy);
213
214 static int __xfrm_state_delete(struct xfrm_state *x)
215 {
216         int err = -ESRCH;
217
218         if (x->km.state != XFRM_STATE_DEAD) {
219                 x->km.state = XFRM_STATE_DEAD;
220                 spin_lock(&xfrm_state_lock);
221                 list_del(&x->bydst);
222                 atomic_dec(&x->refcnt);
223                 if (x->id.spi) {
224                         list_del(&x->byspi);
225                         atomic_dec(&x->refcnt);
226                 }
227                 spin_unlock(&xfrm_state_lock);
228                 if (del_timer(&x->timer))
229                         atomic_dec(&x->refcnt);
230
231                 /* The number two in this test is the reference
232                  * mentioned in the comment below plus the reference
233                  * our caller holds.  A larger value means that
234                  * there are DSTs attached to this xfrm_state.
235                  */
236                 if (atomic_read(&x->refcnt) > 2) {
237                         xfrm_state_gc_flush_bundles = 1;
238                         schedule_work(&xfrm_state_gc_work);
239                 }
240
241                 /* All xfrm_state objects are created by xfrm_state_alloc.
242                  * The xfrm_state_alloc call gives a reference, and that
243                  * is what we are dropping here.
244                  */
245                 atomic_dec(&x->refcnt);
246                 err = 0;
247         }
248
249         return err;
250 }
251
252 int xfrm_state_delete(struct xfrm_state *x)
253 {
254         int err;
255
256         spin_lock_bh(&x->lock);
257         err = __xfrm_state_delete(x);
258         spin_unlock_bh(&x->lock);
259
260         return err;
261 }
262 EXPORT_SYMBOL(xfrm_state_delete);
263
264 void xfrm_state_flush(u8 proto)
265 {
266         int i;
267         struct xfrm_state *x;
268
269         spin_lock_bh(&xfrm_state_lock);
270         for (i = 0; i < XFRM_DST_HSIZE; i++) {
271 restart:
272                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
273                         if (!xfrm_state_kern(x) &&
274                             (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
275                                 xfrm_state_hold(x);
276                                 spin_unlock_bh(&xfrm_state_lock);
277
278                                 xfrm_state_delete(x);
279                                 xfrm_state_put(x);
280
281                                 spin_lock_bh(&xfrm_state_lock);
282                                 goto restart;
283                         }
284                 }
285         }
286         spin_unlock_bh(&xfrm_state_lock);
287         wake_up(&km_waitq);
288 }
289 EXPORT_SYMBOL(xfrm_state_flush);
290
291 static int
292 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
293                   struct xfrm_tmpl *tmpl,
294                   xfrm_address_t *daddr, xfrm_address_t *saddr,
295                   unsigned short family)
296 {
297         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
298         if (!afinfo)
299                 return -1;
300         afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
301         xfrm_state_put_afinfo(afinfo);
302         return 0;
303 }
304
305 struct xfrm_state *
306 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 
307                 struct flowi *fl, struct xfrm_tmpl *tmpl,
308                 struct xfrm_policy *pol, int *err,
309                 unsigned short family)
310 {
311         unsigned h = xfrm_dst_hash(daddr, family);
312         struct xfrm_state *x, *x0;
313         int acquire_in_progress = 0;
314         int error = 0;
315         struct xfrm_state *best = NULL;
316         struct xfrm_state_afinfo *afinfo;
317         
318         afinfo = xfrm_state_get_afinfo(family);
319         if (afinfo == NULL) {
320                 *err = -EAFNOSUPPORT;
321                 return NULL;
322         }
323
324         spin_lock_bh(&xfrm_state_lock);
325         list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
326                 if (x->props.family == family &&
327                     x->props.reqid == tmpl->reqid &&
328                     xfrm_state_addr_check(x, daddr, saddr, family) &&
329                     tmpl->mode == x->props.mode &&
330                     tmpl->id.proto == x->id.proto &&
331                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
332                         /* Resolution logic:
333                            1. There is a valid state with matching selector.
334                               Done.
335                            2. Valid state with inappropriate selector. Skip.
336
337                            Entering area of "sysdeps".
338
339                            3. If state is not valid, selector is temporary,
340                               it selects only session which triggered
341                               previous resolution. Key manager will do
342                               something to install a state with proper
343                               selector.
344                          */
345                         if (x->km.state == XFRM_STATE_VALID) {
346                                 if (!xfrm_selector_match(&x->sel, fl, family))
347                                         continue;
348                                 if (!best ||
349                                     best->km.dying > x->km.dying ||
350                                     (best->km.dying == x->km.dying &&
351                                      best->curlft.add_time < x->curlft.add_time))
352                                         best = x;
353                         } else if (x->km.state == XFRM_STATE_ACQ) {
354                                 acquire_in_progress = 1;
355                         } else if (x->km.state == XFRM_STATE_ERROR ||
356                                    x->km.state == XFRM_STATE_EXPIRED) {
357                                 if (xfrm_selector_match(&x->sel, fl, family))
358                                         error = -ESRCH;
359                         }
360                 }
361         }
362
363         x = best;
364         if (!x && !error && !acquire_in_progress) {
365                 if (tmpl->id.spi &&
366                     (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
367                                                tmpl->id.proto)) != NULL) {
368                         xfrm_state_put(x0);
369                         error = -EEXIST;
370                         goto out;
371                 }
372                 x = xfrm_state_alloc();
373                 if (x == NULL) {
374                         error = -ENOMEM;
375                         goto out;
376                 }
377                 /* Initialize temporary selector matching only
378                  * to current session. */
379                 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
380
381                 if (km_query(x, tmpl, pol) == 0) {
382                         x->km.state = XFRM_STATE_ACQ;
383                         list_add_tail(&x->bydst, xfrm_state_bydst+h);
384                         xfrm_state_hold(x);
385                         if (x->id.spi) {
386                                 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
387                                 list_add(&x->byspi, xfrm_state_byspi+h);
388                                 xfrm_state_hold(x);
389                         }
390                         x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
391                         xfrm_state_hold(x);
392                         x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
393                         add_timer(&x->timer);
394                 } else {
395                         x->km.state = XFRM_STATE_DEAD;
396                         xfrm_state_put(x);
397                         x = NULL;
398                         error = -ESRCH;
399                 }
400         }
401 out:
402         if (x)
403                 xfrm_state_hold(x);
404         else
405                 *err = acquire_in_progress ? -EAGAIN : error;
406         spin_unlock_bh(&xfrm_state_lock);
407         xfrm_state_put_afinfo(afinfo);
408         return x;
409 }
410
411 static void __xfrm_state_insert(struct xfrm_state *x)
412 {
413         unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
414
415         list_add(&x->bydst, xfrm_state_bydst+h);
416         xfrm_state_hold(x);
417
418         h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
419
420         list_add(&x->byspi, xfrm_state_byspi+h);
421         xfrm_state_hold(x);
422
423         if (!mod_timer(&x->timer, jiffies + HZ))
424                 xfrm_state_hold(x);
425
426         wake_up(&km_waitq);
427 }
428
429 void xfrm_state_insert(struct xfrm_state *x)
430 {
431         spin_lock_bh(&xfrm_state_lock);
432         __xfrm_state_insert(x);
433         spin_unlock_bh(&xfrm_state_lock);
434
435         xfrm_flush_all_bundles();
436 }
437 EXPORT_SYMBOL(xfrm_state_insert);
438
439 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
440
441 int xfrm_state_add(struct xfrm_state *x)
442 {
443         struct xfrm_state_afinfo *afinfo;
444         struct xfrm_state *x1;
445         int family;
446         int err;
447
448         family = x->props.family;
449         afinfo = xfrm_state_get_afinfo(family);
450         if (unlikely(afinfo == NULL))
451                 return -EAFNOSUPPORT;
452
453         spin_lock_bh(&xfrm_state_lock);
454
455         x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
456         if (x1) {
457                 xfrm_state_put(x1);
458                 x1 = NULL;
459                 err = -EEXIST;
460                 goto out;
461         }
462
463         if (x->km.seq) {
464                 x1 = __xfrm_find_acq_byseq(x->km.seq);
465                 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
466                         xfrm_state_put(x1);
467                         x1 = NULL;
468                 }
469         }
470
471         if (!x1)
472                 x1 = afinfo->find_acq(
473                         x->props.mode, x->props.reqid, x->id.proto,
474                         &x->id.daddr, &x->props.saddr, 0);
475
476         __xfrm_state_insert(x);
477         err = 0;
478
479 out:
480         spin_unlock_bh(&xfrm_state_lock);
481         xfrm_state_put_afinfo(afinfo);
482
483         if (!err)
484                 xfrm_flush_all_bundles();
485
486         if (x1) {
487                 xfrm_state_delete(x1);
488                 xfrm_state_put(x1);
489         }
490
491         return err;
492 }
493 EXPORT_SYMBOL(xfrm_state_add);
494
495 int xfrm_state_update(struct xfrm_state *x)
496 {
497         struct xfrm_state_afinfo *afinfo;
498         struct xfrm_state *x1;
499         int err;
500
501         afinfo = xfrm_state_get_afinfo(x->props.family);
502         if (unlikely(afinfo == NULL))
503                 return -EAFNOSUPPORT;
504
505         spin_lock_bh(&xfrm_state_lock);
506         x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
507
508         err = -ESRCH;
509         if (!x1)
510                 goto out;
511
512         if (xfrm_state_kern(x1)) {
513                 xfrm_state_put(x1);
514                 err = -EEXIST;
515                 goto out;
516         }
517
518         if (x1->km.state == XFRM_STATE_ACQ) {
519                 __xfrm_state_insert(x);
520                 x = NULL;
521         }
522         err = 0;
523
524 out:
525         spin_unlock_bh(&xfrm_state_lock);
526         xfrm_state_put_afinfo(afinfo);
527
528         if (err)
529                 return err;
530
531         if (!x) {
532                 xfrm_state_delete(x1);
533                 xfrm_state_put(x1);
534                 return 0;
535         }
536
537         err = -EINVAL;
538         spin_lock_bh(&x1->lock);
539         if (likely(x1->km.state == XFRM_STATE_VALID)) {
540                 if (x->encap && x1->encap)
541                         memcpy(x1->encap, x->encap, sizeof(*x1->encap));
542                 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
543                 x1->km.dying = 0;
544
545                 if (!mod_timer(&x1->timer, jiffies + HZ))
546                         xfrm_state_hold(x1);
547                 if (x1->curlft.use_time)
548                         xfrm_state_check_expire(x1);
549
550                 err = 0;
551         }
552         spin_unlock_bh(&x1->lock);
553
554         xfrm_state_put(x1);
555
556         return err;
557 }
558 EXPORT_SYMBOL(xfrm_state_update);
559
560 int xfrm_state_check_expire(struct xfrm_state *x)
561 {
562         if (!x->curlft.use_time)
563                 x->curlft.use_time = (unsigned long)xtime.tv_sec;
564
565         if (x->km.state != XFRM_STATE_VALID)
566                 return -EINVAL;
567
568         if (x->curlft.bytes >= x->lft.hard_byte_limit ||
569             x->curlft.packets >= x->lft.hard_packet_limit) {
570                 x->km.state = XFRM_STATE_EXPIRED;
571                 if (!mod_timer(&x->timer, jiffies))
572                         xfrm_state_hold(x);
573                 return -EINVAL;
574         }
575
576         if (!x->km.dying &&
577             (x->curlft.bytes >= x->lft.soft_byte_limit ||
578              x->curlft.packets >= x->lft.soft_packet_limit)) {
579                 x->km.dying = 1;
580                 km_state_expired(x, 0);
581         }
582         return 0;
583 }
584 EXPORT_SYMBOL(xfrm_state_check_expire);
585
586 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
587 {
588         int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
589                 - skb_headroom(skb);
590
591         if (nhead > 0)
592                 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
593
594         /* Check tail too... */
595         return 0;
596 }
597
598 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
599 {
600         int err = xfrm_state_check_expire(x);
601         if (err < 0)
602                 goto err;
603         err = xfrm_state_check_space(x, skb);
604 err:
605         return err;
606 }
607 EXPORT_SYMBOL(xfrm_state_check);
608
609 struct xfrm_state *
610 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
611                   unsigned short family)
612 {
613         struct xfrm_state *x;
614         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
615         if (!afinfo)
616                 return NULL;
617
618         spin_lock_bh(&xfrm_state_lock);
619         x = afinfo->state_lookup(daddr, spi, proto);
620         spin_unlock_bh(&xfrm_state_lock);
621         xfrm_state_put_afinfo(afinfo);
622         return x;
623 }
624 EXPORT_SYMBOL(xfrm_state_lookup);
625
626 struct xfrm_state *
627 xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 
628               xfrm_address_t *daddr, xfrm_address_t *saddr, 
629               int create, unsigned short family)
630 {
631         struct xfrm_state *x;
632         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
633         if (!afinfo)
634                 return NULL;
635
636         spin_lock_bh(&xfrm_state_lock);
637         x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
638         spin_unlock_bh(&xfrm_state_lock);
639         xfrm_state_put_afinfo(afinfo);
640         return x;
641 }
642 EXPORT_SYMBOL(xfrm_find_acq);
643
644 /* Silly enough, but I'm lazy to build resolution list */
645
646 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
647 {
648         int i;
649         struct xfrm_state *x;
650
651         for (i = 0; i < XFRM_DST_HSIZE; i++) {
652                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
653                         if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
654                                 xfrm_state_hold(x);
655                                 return x;
656                         }
657                 }
658         }
659         return NULL;
660 }
661
662 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
663 {
664         struct xfrm_state *x;
665
666         spin_lock_bh(&xfrm_state_lock);
667         x = __xfrm_find_acq_byseq(seq);
668         spin_unlock_bh(&xfrm_state_lock);
669         return x;
670 }
671 EXPORT_SYMBOL(xfrm_find_acq_byseq);
672
673 u32 xfrm_get_acqseq(void)
674 {
675         u32 res;
676         static u32 acqseq;
677         static DEFINE_SPINLOCK(acqseq_lock);
678
679         spin_lock_bh(&acqseq_lock);
680         res = (++acqseq ? : ++acqseq);
681         spin_unlock_bh(&acqseq_lock);
682         return res;
683 }
684 EXPORT_SYMBOL(xfrm_get_acqseq);
685
686 void
687 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
688 {
689         u32 h;
690         struct xfrm_state *x0;
691
692         if (x->id.spi)
693                 return;
694
695         if (minspi == maxspi) {
696                 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
697                 if (x0) {
698                         xfrm_state_put(x0);
699                         return;
700                 }
701                 x->id.spi = minspi;
702         } else {
703                 u32 spi = 0;
704                 minspi = ntohl(minspi);
705                 maxspi = ntohl(maxspi);
706                 for (h=0; h<maxspi-minspi+1; h++) {
707                         spi = minspi + net_random()%(maxspi-minspi+1);
708                         x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
709                         if (x0 == NULL) {
710                                 x->id.spi = htonl(spi);
711                                 break;
712                         }
713                         xfrm_state_put(x0);
714                 }
715         }
716         if (x->id.spi) {
717                 spin_lock_bh(&xfrm_state_lock);
718                 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
719                 list_add(&x->byspi, xfrm_state_byspi+h);
720                 xfrm_state_hold(x);
721                 spin_unlock_bh(&xfrm_state_lock);
722                 wake_up(&km_waitq);
723         }
724 }
725 EXPORT_SYMBOL(xfrm_alloc_spi);
726
727 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
728                     void *data)
729 {
730         int i;
731         struct xfrm_state *x;
732         int count = 0;
733         int err = 0;
734
735         spin_lock_bh(&xfrm_state_lock);
736         for (i = 0; i < XFRM_DST_HSIZE; i++) {
737                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
738                         if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
739                                 count++;
740                 }
741         }
742         if (count == 0) {
743                 err = -ENOENT;
744                 goto out;
745         }
746
747         for (i = 0; i < XFRM_DST_HSIZE; i++) {
748                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
749                         if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
750                                 continue;
751                         err = func(x, --count, data);
752                         if (err)
753                                 goto out;
754                 }
755         }
756 out:
757         spin_unlock_bh(&xfrm_state_lock);
758         return err;
759 }
760 EXPORT_SYMBOL(xfrm_state_walk);
761
762 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
763 {
764         u32 diff;
765
766         seq = ntohl(seq);
767
768         if (unlikely(seq == 0))
769                 return -EINVAL;
770
771         if (likely(seq > x->replay.seq))
772                 return 0;
773
774         diff = x->replay.seq - seq;
775         if (diff >= x->props.replay_window) {
776                 x->stats.replay_window++;
777                 return -EINVAL;
778         }
779
780         if (x->replay.bitmap & (1U << diff)) {
781                 x->stats.replay++;
782                 return -EINVAL;
783         }
784         return 0;
785 }
786 EXPORT_SYMBOL(xfrm_replay_check);
787
788 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
789 {
790         u32 diff;
791
792         seq = ntohl(seq);
793
794         if (seq > x->replay.seq) {
795                 diff = seq - x->replay.seq;
796                 if (diff < x->props.replay_window)
797                         x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
798                 else
799                         x->replay.bitmap = 1;
800                 x->replay.seq = seq;
801         } else {
802                 diff = x->replay.seq - seq;
803                 x->replay.bitmap |= (1U << diff);
804         }
805 }
806 EXPORT_SYMBOL(xfrm_replay_advance);
807
808 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
809 static DEFINE_RWLOCK(xfrm_km_lock);
810
811 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
812 {
813         struct xfrm_mgr *km;
814
815         read_lock(&xfrm_km_lock);
816         list_for_each_entry(km, &xfrm_km_list, list)
817                 if (km->notify_policy)
818                         km->notify_policy(xp, dir, c);
819         read_unlock(&xfrm_km_lock);
820 }
821
822 void km_state_notify(struct xfrm_state *x, struct km_event *c)
823 {
824         struct xfrm_mgr *km;
825         read_lock(&xfrm_km_lock);
826         list_for_each_entry(km, &xfrm_km_list, list)
827                 if (km->notify)
828                         km->notify(x, c);
829         read_unlock(&xfrm_km_lock);
830 }
831
832 EXPORT_SYMBOL(km_policy_notify);
833 EXPORT_SYMBOL(km_state_notify);
834
835 static void km_state_expired(struct xfrm_state *x, int hard)
836 {
837         struct km_event c;
838
839         c.data.hard = hard;
840         c.event = XFRM_MSG_EXPIRE;
841         km_state_notify(x, &c);
842
843         if (hard)
844                 wake_up(&km_waitq);
845 }
846
847 /*
848  * We send to all registered managers regardless of failure
849  * We are happy with one success
850 */
851 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
852 {
853         int err = -EINVAL, acqret;
854         struct xfrm_mgr *km;
855
856         read_lock(&xfrm_km_lock);
857         list_for_each_entry(km, &xfrm_km_list, list) {
858                 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
859                 if (!acqret)
860                         err = acqret;
861         }
862         read_unlock(&xfrm_km_lock);
863         return err;
864 }
865
866 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
867 {
868         int err = -EINVAL;
869         struct xfrm_mgr *km;
870
871         read_lock(&xfrm_km_lock);
872         list_for_each_entry(km, &xfrm_km_list, list) {
873                 if (km->new_mapping)
874                         err = km->new_mapping(x, ipaddr, sport);
875                 if (!err)
876                         break;
877         }
878         read_unlock(&xfrm_km_lock);
879         return err;
880 }
881 EXPORT_SYMBOL(km_new_mapping);
882
883 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
884 {
885         struct km_event c;
886
887         c.data.hard = hard;
888         c.event = XFRM_MSG_POLEXPIRE;
889         km_policy_notify(pol, dir, &c);
890
891         if (hard)
892                 wake_up(&km_waitq);
893 }
894
895 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
896 {
897         int err;
898         u8 *data;
899         struct xfrm_mgr *km;
900         struct xfrm_policy *pol = NULL;
901
902         if (optlen <= 0 || optlen > PAGE_SIZE)
903                 return -EMSGSIZE;
904
905         data = kmalloc(optlen, GFP_KERNEL);
906         if (!data)
907                 return -ENOMEM;
908
909         err = -EFAULT;
910         if (copy_from_user(data, optval, optlen))
911                 goto out;
912
913         err = -EINVAL;
914         read_lock(&xfrm_km_lock);
915         list_for_each_entry(km, &xfrm_km_list, list) {
916                 pol = km->compile_policy(sk->sk_family, optname, data,
917                                          optlen, &err);
918                 if (err >= 0)
919                         break;
920         }
921         read_unlock(&xfrm_km_lock);
922
923         if (err >= 0) {
924                 xfrm_sk_policy_insert(sk, err, pol);
925                 xfrm_pol_put(pol);
926                 err = 0;
927         }
928
929 out:
930         kfree(data);
931         return err;
932 }
933 EXPORT_SYMBOL(xfrm_user_policy);
934
935 int xfrm_register_km(struct xfrm_mgr *km)
936 {
937         write_lock_bh(&xfrm_km_lock);
938         list_add_tail(&km->list, &xfrm_km_list);
939         write_unlock_bh(&xfrm_km_lock);
940         return 0;
941 }
942 EXPORT_SYMBOL(xfrm_register_km);
943
944 int xfrm_unregister_km(struct xfrm_mgr *km)
945 {
946         write_lock_bh(&xfrm_km_lock);
947         list_del(&km->list);
948         write_unlock_bh(&xfrm_km_lock);
949         return 0;
950 }
951 EXPORT_SYMBOL(xfrm_unregister_km);
952
953 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
954 {
955         int err = 0;
956         if (unlikely(afinfo == NULL))
957                 return -EINVAL;
958         if (unlikely(afinfo->family >= NPROTO))
959                 return -EAFNOSUPPORT;
960         write_lock(&xfrm_state_afinfo_lock);
961         if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
962                 err = -ENOBUFS;
963         else {
964                 afinfo->state_bydst = xfrm_state_bydst;
965                 afinfo->state_byspi = xfrm_state_byspi;
966                 xfrm_state_afinfo[afinfo->family] = afinfo;
967         }
968         write_unlock(&xfrm_state_afinfo_lock);
969         return err;
970 }
971 EXPORT_SYMBOL(xfrm_state_register_afinfo);
972
973 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
974 {
975         int err = 0;
976         if (unlikely(afinfo == NULL))
977                 return -EINVAL;
978         if (unlikely(afinfo->family >= NPROTO))
979                 return -EAFNOSUPPORT;
980         write_lock(&xfrm_state_afinfo_lock);
981         if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
982                 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
983                         err = -EINVAL;
984                 else {
985                         xfrm_state_afinfo[afinfo->family] = NULL;
986                         afinfo->state_byspi = NULL;
987                         afinfo->state_bydst = NULL;
988                 }
989         }
990         write_unlock(&xfrm_state_afinfo_lock);
991         return err;
992 }
993 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
994
995 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
996 {
997         struct xfrm_state_afinfo *afinfo;
998         if (unlikely(family >= NPROTO))
999                 return NULL;
1000         read_lock(&xfrm_state_afinfo_lock);
1001         afinfo = xfrm_state_afinfo[family];
1002         if (likely(afinfo != NULL))
1003                 read_lock(&afinfo->lock);
1004         read_unlock(&xfrm_state_afinfo_lock);
1005         return afinfo;
1006 }
1007
1008 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1009 {
1010         if (unlikely(afinfo == NULL))
1011                 return;
1012         read_unlock(&afinfo->lock);
1013 }
1014
1015 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1016 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1017 {
1018         if (x->tunnel) {
1019                 struct xfrm_state *t = x->tunnel;
1020
1021                 if (atomic_read(&t->tunnel_users) == 2)
1022                         xfrm_state_delete(t);
1023                 atomic_dec(&t->tunnel_users);
1024                 xfrm_state_put(t);
1025                 x->tunnel = NULL;
1026         }
1027 }
1028 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1029
1030 /*
1031  * This function is NOT optimal.  For example, with ESP it will give an
1032  * MTU that's usually two bytes short of being optimal.  However, it will
1033  * usually give an answer that's a multiple of 4 provided the input is
1034  * also a multiple of 4.
1035  */
1036 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1037 {
1038         int res = mtu;
1039
1040         res -= x->props.header_len;
1041
1042         for (;;) {
1043                 int m = res;
1044
1045                 if (m < 68)
1046                         return 68;
1047
1048                 spin_lock_bh(&x->lock);
1049                 if (x->km.state == XFRM_STATE_VALID &&
1050                     x->type && x->type->get_max_size)
1051                         m = x->type->get_max_size(x, m);
1052                 else
1053                         m += x->props.header_len;
1054                 spin_unlock_bh(&x->lock);
1055
1056                 if (m <= mtu)
1057                         break;
1058                 res -= (m - mtu);
1059         }
1060
1061         return res;
1062 }
1063
1064 EXPORT_SYMBOL(xfrm_state_mtu);
1065
1066 int xfrm_init_state(struct xfrm_state *x)
1067 {
1068         struct xfrm_state_afinfo *afinfo;
1069         int family = x->props.family;
1070         int err;
1071
1072         err = -EAFNOSUPPORT;
1073         afinfo = xfrm_state_get_afinfo(family);
1074         if (!afinfo)
1075                 goto error;
1076
1077         err = 0;
1078         if (afinfo->init_flags)
1079                 err = afinfo->init_flags(x);
1080
1081         xfrm_state_put_afinfo(afinfo);
1082
1083         if (err)
1084                 goto error;
1085
1086         err = -EPROTONOSUPPORT;
1087         x->type = xfrm_get_type(x->id.proto, family);
1088         if (x->type == NULL)
1089                 goto error;
1090
1091         err = x->type->init_state(x);
1092         if (err)
1093                 goto error;
1094
1095         x->km.state = XFRM_STATE_VALID;
1096
1097 error:
1098         return err;
1099 }
1100
1101 EXPORT_SYMBOL(xfrm_init_state);
1102  
1103 void __init xfrm_state_init(void)
1104 {
1105         int i;
1106
1107         for (i=0; i<XFRM_DST_HSIZE; i++) {
1108                 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1109                 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1110         }
1111         INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1112 }
1113