Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[linux-2.6] / net / xfrm / xfrm_policy.c
1 /* 
2  * xfrm_policy.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      Kazunori MIYAZAWA @USAGI
10  *      YOSHIFUJI Hideaki
11  *              Split up af-specific portion
12  *      Derek Atkins <derek@ihtfp.com>          Add the post_input processor
13  *
14  */
15
16 #include <linux/config.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <net/xfrm.h>
27 #include <net/ip.h>
28
29 DECLARE_MUTEX(xfrm_cfg_sem);
30 EXPORT_SYMBOL(xfrm_cfg_sem);
31
32 static DEFINE_RWLOCK(xfrm_policy_lock);
33
34 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
35 EXPORT_SYMBOL(xfrm_policy_list);
36
37 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
38 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
39
40 static kmem_cache_t *xfrm_dst_cache __read_mostly;
41
42 static struct work_struct xfrm_policy_gc_work;
43 static struct list_head xfrm_policy_gc_list =
44         LIST_HEAD_INIT(xfrm_policy_gc_list);
45 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
46
47 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
48 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
49
50 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
51 {
52         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
53         struct xfrm_type_map *typemap;
54         int err = 0;
55
56         if (unlikely(afinfo == NULL))
57                 return -EAFNOSUPPORT;
58         typemap = afinfo->type_map;
59
60         write_lock(&typemap->lock);
61         if (likely(typemap->map[type->proto] == NULL))
62                 typemap->map[type->proto] = type;
63         else
64                 err = -EEXIST;
65         write_unlock(&typemap->lock);
66         xfrm_policy_put_afinfo(afinfo);
67         return err;
68 }
69 EXPORT_SYMBOL(xfrm_register_type);
70
71 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
72 {
73         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
74         struct xfrm_type_map *typemap;
75         int err = 0;
76
77         if (unlikely(afinfo == NULL))
78                 return -EAFNOSUPPORT;
79         typemap = afinfo->type_map;
80
81         write_lock(&typemap->lock);
82         if (unlikely(typemap->map[type->proto] != type))
83                 err = -ENOENT;
84         else
85                 typemap->map[type->proto] = NULL;
86         write_unlock(&typemap->lock);
87         xfrm_policy_put_afinfo(afinfo);
88         return err;
89 }
90 EXPORT_SYMBOL(xfrm_unregister_type);
91
92 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
93 {
94         struct xfrm_policy_afinfo *afinfo;
95         struct xfrm_type_map *typemap;
96         struct xfrm_type *type;
97         int modload_attempted = 0;
98
99 retry:
100         afinfo = xfrm_policy_get_afinfo(family);
101         if (unlikely(afinfo == NULL))
102                 return NULL;
103         typemap = afinfo->type_map;
104
105         read_lock(&typemap->lock);
106         type = typemap->map[proto];
107         if (unlikely(type && !try_module_get(type->owner)))
108                 type = NULL;
109         read_unlock(&typemap->lock);
110         if (!type && !modload_attempted) {
111                 xfrm_policy_put_afinfo(afinfo);
112                 request_module("xfrm-type-%d-%d",
113                                (int) family, (int) proto);
114                 modload_attempted = 1;
115                 goto retry;
116         }
117
118         xfrm_policy_put_afinfo(afinfo);
119         return type;
120 }
121
122 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 
123                     unsigned short family)
124 {
125         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
126         int err = 0;
127
128         if (unlikely(afinfo == NULL))
129                 return -EAFNOSUPPORT;
130
131         if (likely(afinfo->dst_lookup != NULL))
132                 err = afinfo->dst_lookup(dst, fl);
133         else
134                 err = -EINVAL;
135         xfrm_policy_put_afinfo(afinfo);
136         return err;
137 }
138 EXPORT_SYMBOL(xfrm_dst_lookup);
139
140 void xfrm_put_type(struct xfrm_type *type)
141 {
142         module_put(type->owner);
143 }
144
145 static inline unsigned long make_jiffies(long secs)
146 {
147         if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
148                 return MAX_SCHEDULE_TIMEOUT-1;
149         else
150                 return secs*HZ;
151 }
152
153 static void xfrm_policy_timer(unsigned long data)
154 {
155         struct xfrm_policy *xp = (struct xfrm_policy*)data;
156         unsigned long now = (unsigned long)xtime.tv_sec;
157         long next = LONG_MAX;
158         int warn = 0;
159         int dir;
160
161         read_lock(&xp->lock);
162
163         if (xp->dead)
164                 goto out;
165
166         dir = xfrm_policy_id2dir(xp->index);
167
168         if (xp->lft.hard_add_expires_seconds) {
169                 long tmo = xp->lft.hard_add_expires_seconds +
170                         xp->curlft.add_time - now;
171                 if (tmo <= 0)
172                         goto expired;
173                 if (tmo < next)
174                         next = tmo;
175         }
176         if (xp->lft.hard_use_expires_seconds) {
177                 long tmo = xp->lft.hard_use_expires_seconds +
178                         (xp->curlft.use_time ? : xp->curlft.add_time) - now;
179                 if (tmo <= 0)
180                         goto expired;
181                 if (tmo < next)
182                         next = tmo;
183         }
184         if (xp->lft.soft_add_expires_seconds) {
185                 long tmo = xp->lft.soft_add_expires_seconds +
186                         xp->curlft.add_time - now;
187                 if (tmo <= 0) {
188                         warn = 1;
189                         tmo = XFRM_KM_TIMEOUT;
190                 }
191                 if (tmo < next)
192                         next = tmo;
193         }
194         if (xp->lft.soft_use_expires_seconds) {
195                 long tmo = xp->lft.soft_use_expires_seconds +
196                         (xp->curlft.use_time ? : xp->curlft.add_time) - now;
197                 if (tmo <= 0) {
198                         warn = 1;
199                         tmo = XFRM_KM_TIMEOUT;
200                 }
201                 if (tmo < next)
202                         next = tmo;
203         }
204
205         if (warn)
206                 km_policy_expired(xp, dir, 0);
207         if (next != LONG_MAX &&
208             !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
209                 xfrm_pol_hold(xp);
210
211 out:
212         read_unlock(&xp->lock);
213         xfrm_pol_put(xp);
214         return;
215
216 expired:
217         read_unlock(&xp->lock);
218         if (!xfrm_policy_delete(xp, dir))
219                 km_policy_expired(xp, dir, 1);
220         xfrm_pol_put(xp);
221 }
222
223
224 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
225  * SPD calls.
226  */
227
228 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
229 {
230         struct xfrm_policy *policy;
231
232         policy = kmalloc(sizeof(struct xfrm_policy), gfp);
233
234         if (policy) {
235                 memset(policy, 0, sizeof(struct xfrm_policy));
236                 atomic_set(&policy->refcnt, 1);
237                 rwlock_init(&policy->lock);
238                 init_timer(&policy->timer);
239                 policy->timer.data = (unsigned long)policy;
240                 policy->timer.function = xfrm_policy_timer;
241         }
242         return policy;
243 }
244 EXPORT_SYMBOL(xfrm_policy_alloc);
245
246 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
247
248 void __xfrm_policy_destroy(struct xfrm_policy *policy)
249 {
250         BUG_ON(!policy->dead);
251
252         BUG_ON(policy->bundles);
253
254         if (del_timer(&policy->timer))
255                 BUG();
256
257         security_xfrm_policy_free(policy);
258         kfree(policy);
259 }
260 EXPORT_SYMBOL(__xfrm_policy_destroy);
261
262 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
263 {
264         struct dst_entry *dst;
265
266         while ((dst = policy->bundles) != NULL) {
267                 policy->bundles = dst->next;
268                 dst_free(dst);
269         }
270
271         if (del_timer(&policy->timer))
272                 atomic_dec(&policy->refcnt);
273
274         if (atomic_read(&policy->refcnt) > 1)
275                 flow_cache_flush();
276
277         xfrm_pol_put(policy);
278 }
279
280 static void xfrm_policy_gc_task(void *data)
281 {
282         struct xfrm_policy *policy;
283         struct list_head *entry, *tmp;
284         struct list_head gc_list = LIST_HEAD_INIT(gc_list);
285
286         spin_lock_bh(&xfrm_policy_gc_lock);
287         list_splice_init(&xfrm_policy_gc_list, &gc_list);
288         spin_unlock_bh(&xfrm_policy_gc_lock);
289
290         list_for_each_safe(entry, tmp, &gc_list) {
291                 policy = list_entry(entry, struct xfrm_policy, list);
292                 xfrm_policy_gc_kill(policy);
293         }
294 }
295
296 /* Rule must be locked. Release descentant resources, announce
297  * entry dead. The rule must be unlinked from lists to the moment.
298  */
299
300 static void xfrm_policy_kill(struct xfrm_policy *policy)
301 {
302         int dead;
303
304         write_lock_bh(&policy->lock);
305         dead = policy->dead;
306         policy->dead = 1;
307         write_unlock_bh(&policy->lock);
308
309         if (unlikely(dead)) {
310                 WARN_ON(1);
311                 return;
312         }
313
314         spin_lock(&xfrm_policy_gc_lock);
315         list_add(&policy->list, &xfrm_policy_gc_list);
316         spin_unlock(&xfrm_policy_gc_lock);
317
318         schedule_work(&xfrm_policy_gc_work);
319 }
320
321 /* Generate new index... KAME seems to generate them ordered by cost
322  * of an absolute inpredictability of ordering of rules. This will not pass. */
323 static u32 xfrm_gen_index(int dir)
324 {
325         u32 idx;
326         struct xfrm_policy *p;
327         static u32 idx_generator;
328
329         for (;;) {
330                 idx = (idx_generator | dir);
331                 idx_generator += 8;
332                 if (idx == 0)
333                         idx = 8;
334                 for (p = xfrm_policy_list[dir]; p; p = p->next) {
335                         if (p->index == idx)
336                                 break;
337                 }
338                 if (!p)
339                         return idx;
340         }
341 }
342
343 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
344 {
345         struct xfrm_policy *pol, **p;
346         struct xfrm_policy *delpol = NULL;
347         struct xfrm_policy **newpos = NULL;
348         struct dst_entry *gc_list;
349
350         write_lock_bh(&xfrm_policy_lock);
351         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
352                 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 &&
353                     xfrm_sec_ctx_match(pol->security, policy->security)) {
354                         if (excl) {
355                                 write_unlock_bh(&xfrm_policy_lock);
356                                 return -EEXIST;
357                         }
358                         *p = pol->next;
359                         delpol = pol;
360                         if (policy->priority > pol->priority)
361                                 continue;
362                 } else if (policy->priority >= pol->priority) {
363                         p = &pol->next;
364                         continue;
365                 }
366                 if (!newpos)
367                         newpos = p;
368                 if (delpol)
369                         break;
370                 p = &pol->next;
371         }
372         if (newpos)
373                 p = newpos;
374         xfrm_pol_hold(policy);
375         policy->next = *p;
376         *p = policy;
377         atomic_inc(&flow_cache_genid);
378         policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
379         policy->curlft.add_time = (unsigned long)xtime.tv_sec;
380         policy->curlft.use_time = 0;
381         if (!mod_timer(&policy->timer, jiffies + HZ))
382                 xfrm_pol_hold(policy);
383         write_unlock_bh(&xfrm_policy_lock);
384
385         if (delpol)
386                 xfrm_policy_kill(delpol);
387
388         read_lock_bh(&xfrm_policy_lock);
389         gc_list = NULL;
390         for (policy = policy->next; policy; policy = policy->next) {
391                 struct dst_entry *dst;
392
393                 write_lock(&policy->lock);
394                 dst = policy->bundles;
395                 if (dst) {
396                         struct dst_entry *tail = dst;
397                         while (tail->next)
398                                 tail = tail->next;
399                         tail->next = gc_list;
400                         gc_list = dst;
401
402                         policy->bundles = NULL;
403                 }
404                 write_unlock(&policy->lock);
405         }
406         read_unlock_bh(&xfrm_policy_lock);
407
408         while (gc_list) {
409                 struct dst_entry *dst = gc_list;
410
411                 gc_list = dst->next;
412                 dst_free(dst);
413         }
414
415         return 0;
416 }
417 EXPORT_SYMBOL(xfrm_policy_insert);
418
419 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel,
420                                           struct xfrm_sec_ctx *ctx, int delete)
421 {
422         struct xfrm_policy *pol, **p;
423
424         write_lock_bh(&xfrm_policy_lock);
425         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
426                 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) &&
427                     (xfrm_sec_ctx_match(ctx, pol->security))) {
428                         xfrm_pol_hold(pol);
429                         if (delete)
430                                 *p = pol->next;
431                         break;
432                 }
433         }
434         write_unlock_bh(&xfrm_policy_lock);
435
436         if (pol && delete) {
437                 atomic_inc(&flow_cache_genid);
438                 xfrm_policy_kill(pol);
439         }
440         return pol;
441 }
442 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
443
444 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
445 {
446         struct xfrm_policy *pol, **p;
447
448         write_lock_bh(&xfrm_policy_lock);
449         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
450                 if (pol->index == id) {
451                         xfrm_pol_hold(pol);
452                         if (delete)
453                                 *p = pol->next;
454                         break;
455                 }
456         }
457         write_unlock_bh(&xfrm_policy_lock);
458
459         if (pol && delete) {
460                 atomic_inc(&flow_cache_genid);
461                 xfrm_policy_kill(pol);
462         }
463         return pol;
464 }
465 EXPORT_SYMBOL(xfrm_policy_byid);
466
467 void xfrm_policy_flush(void)
468 {
469         struct xfrm_policy *xp;
470         int dir;
471
472         write_lock_bh(&xfrm_policy_lock);
473         for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
474                 while ((xp = xfrm_policy_list[dir]) != NULL) {
475                         xfrm_policy_list[dir] = xp->next;
476                         write_unlock_bh(&xfrm_policy_lock);
477
478                         xfrm_policy_kill(xp);
479
480                         write_lock_bh(&xfrm_policy_lock);
481                 }
482         }
483         atomic_inc(&flow_cache_genid);
484         write_unlock_bh(&xfrm_policy_lock);
485 }
486 EXPORT_SYMBOL(xfrm_policy_flush);
487
488 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
489                      void *data)
490 {
491         struct xfrm_policy *xp;
492         int dir;
493         int count = 0;
494         int error = 0;
495
496         read_lock_bh(&xfrm_policy_lock);
497         for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
498                 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
499                         count++;
500         }
501
502         if (count == 0) {
503                 error = -ENOENT;
504                 goto out;
505         }
506
507         for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
508                 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
509                         error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
510                         if (error)
511                                 goto out;
512                 }
513         }
514
515 out:
516         read_unlock_bh(&xfrm_policy_lock);
517         return error;
518 }
519 EXPORT_SYMBOL(xfrm_policy_walk);
520
521 /* Find policy to apply to this flow. */
522
523 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir,
524                                void **objp, atomic_t **obj_refp)
525 {
526         struct xfrm_policy *pol;
527
528         read_lock_bh(&xfrm_policy_lock);
529         for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
530                 struct xfrm_selector *sel = &pol->selector;
531                 int match;
532
533                 if (pol->family != family)
534                         continue;
535
536                 match = xfrm_selector_match(sel, fl, family);
537
538                 if (match) {
539                         if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) {
540                                 xfrm_pol_hold(pol);
541                                 break;
542                         }
543                 }
544         }
545         read_unlock_bh(&xfrm_policy_lock);
546         if ((*objp = (void *) pol) != NULL)
547                 *obj_refp = &pol->refcnt;
548 }
549
550 static inline int policy_to_flow_dir(int dir)
551 {
552         if (XFRM_POLICY_IN == FLOW_DIR_IN &&
553             XFRM_POLICY_OUT == FLOW_DIR_OUT &&
554             XFRM_POLICY_FWD == FLOW_DIR_FWD)
555                 return dir;
556         switch (dir) {
557         default:
558         case XFRM_POLICY_IN:
559                 return FLOW_DIR_IN;
560         case XFRM_POLICY_OUT:
561                 return FLOW_DIR_OUT;
562         case XFRM_POLICY_FWD:
563                 return FLOW_DIR_FWD;
564         };
565 }
566
567 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid)
568 {
569         struct xfrm_policy *pol;
570
571         read_lock_bh(&xfrm_policy_lock);
572         if ((pol = sk->sk_policy[dir]) != NULL) {
573                 int match = xfrm_selector_match(&pol->selector, fl,
574                                                 sk->sk_family);
575                 int err = 0;
576
577                 if (match)
578                   err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir));
579
580                 if (match && !err)
581                         xfrm_pol_hold(pol);
582                 else
583                         pol = NULL;
584         }
585         read_unlock_bh(&xfrm_policy_lock);
586         return pol;
587 }
588
589 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
590 {
591         pol->next = xfrm_policy_list[dir];
592         xfrm_policy_list[dir] = pol;
593         xfrm_pol_hold(pol);
594 }
595
596 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
597                                                 int dir)
598 {
599         struct xfrm_policy **polp;
600
601         for (polp = &xfrm_policy_list[dir];
602              *polp != NULL; polp = &(*polp)->next) {
603                 if (*polp == pol) {
604                         *polp = pol->next;
605                         return pol;
606                 }
607         }
608         return NULL;
609 }
610
611 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
612 {
613         write_lock_bh(&xfrm_policy_lock);
614         pol = __xfrm_policy_unlink(pol, dir);
615         write_unlock_bh(&xfrm_policy_lock);
616         if (pol) {
617                 if (dir < XFRM_POLICY_MAX)
618                         atomic_inc(&flow_cache_genid);
619                 xfrm_policy_kill(pol);
620                 return 0;
621         }
622         return -ENOENT;
623 }
624
625 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
626 {
627         struct xfrm_policy *old_pol;
628
629         write_lock_bh(&xfrm_policy_lock);
630         old_pol = sk->sk_policy[dir];
631         sk->sk_policy[dir] = pol;
632         if (pol) {
633                 pol->curlft.add_time = (unsigned long)xtime.tv_sec;
634                 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
635                 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
636         }
637         if (old_pol)
638                 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
639         write_unlock_bh(&xfrm_policy_lock);
640
641         if (old_pol) {
642                 xfrm_policy_kill(old_pol);
643         }
644         return 0;
645 }
646
647 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
648 {
649         struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
650
651         if (newp) {
652                 newp->selector = old->selector;
653                 if (security_xfrm_policy_clone(old, newp)) {
654                         kfree(newp);
655                         return NULL;  /* ENOMEM */
656                 }
657                 newp->lft = old->lft;
658                 newp->curlft = old->curlft;
659                 newp->action = old->action;
660                 newp->flags = old->flags;
661                 newp->xfrm_nr = old->xfrm_nr;
662                 newp->index = old->index;
663                 memcpy(newp->xfrm_vec, old->xfrm_vec,
664                        newp->xfrm_nr*sizeof(struct xfrm_tmpl));
665                 write_lock_bh(&xfrm_policy_lock);
666                 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
667                 write_unlock_bh(&xfrm_policy_lock);
668                 xfrm_pol_put(newp);
669         }
670         return newp;
671 }
672
673 int __xfrm_sk_clone_policy(struct sock *sk)
674 {
675         struct xfrm_policy *p0 = sk->sk_policy[0],
676                            *p1 = sk->sk_policy[1];
677
678         sk->sk_policy[0] = sk->sk_policy[1] = NULL;
679         if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
680                 return -ENOMEM;
681         if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
682                 return -ENOMEM;
683         return 0;
684 }
685
686 /* Resolve list of templates for the flow, given policy. */
687
688 static int
689 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
690                   struct xfrm_state **xfrm,
691                   unsigned short family)
692 {
693         int nx;
694         int i, error;
695         xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
696         xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
697
698         for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
699                 struct xfrm_state *x;
700                 xfrm_address_t *remote = daddr;
701                 xfrm_address_t *local  = saddr;
702                 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
703
704                 if (tmpl->mode) {
705                         remote = &tmpl->id.daddr;
706                         local = &tmpl->saddr;
707                 }
708
709                 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
710
711                 if (x && x->km.state == XFRM_STATE_VALID) {
712                         xfrm[nx++] = x;
713                         daddr = remote;
714                         saddr = local;
715                         continue;
716                 }
717                 if (x) {
718                         error = (x->km.state == XFRM_STATE_ERROR ?
719                                  -EINVAL : -EAGAIN);
720                         xfrm_state_put(x);
721                 }
722
723                 if (!tmpl->optional)
724                         goto fail;
725         }
726         return nx;
727
728 fail:
729         for (nx--; nx>=0; nx--)
730                 xfrm_state_put(xfrm[nx]);
731         return error;
732 }
733
734 /* Check that the bundle accepts the flow and its components are
735  * still valid.
736  */
737
738 static struct dst_entry *
739 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
740 {
741         struct dst_entry *x;
742         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
743         if (unlikely(afinfo == NULL))
744                 return ERR_PTR(-EINVAL);
745         x = afinfo->find_bundle(fl, policy);
746         xfrm_policy_put_afinfo(afinfo);
747         return x;
748 }
749
750 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
751  * all the metrics... Shortly, bundle a bundle.
752  */
753
754 static int
755 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
756                    struct flowi *fl, struct dst_entry **dst_p,
757                    unsigned short family)
758 {
759         int err;
760         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
761         if (unlikely(afinfo == NULL))
762                 return -EINVAL;
763         err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
764         xfrm_policy_put_afinfo(afinfo);
765         return err;
766 }
767
768
769 static int stale_bundle(struct dst_entry *dst);
770
771 /* Main function: finds/creates a bundle for given flow.
772  *
773  * At the moment we eat a raw IP route. Mostly to speed up lookups
774  * on interfaces with disabled IPsec.
775  */
776 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
777                 struct sock *sk, int flags)
778 {
779         struct xfrm_policy *policy;
780         struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
781         struct dst_entry *dst, *dst_orig = *dst_p;
782         int nx = 0;
783         int err;
784         u32 genid;
785         u16 family = dst_orig->ops->family;
786         u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
787         u32 sk_sid = security_sk_sid(sk, fl, dir);
788 restart:
789         genid = atomic_read(&flow_cache_genid);
790         policy = NULL;
791         if (sk && sk->sk_policy[1])
792                 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid);
793
794         if (!policy) {
795                 /* To accelerate a bit...  */
796                 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
797                         return 0;
798
799                 policy = flow_cache_lookup(fl, sk_sid, family, dir,
800                                            xfrm_policy_lookup);
801         }
802
803         if (!policy)
804                 return 0;
805
806         policy->curlft.use_time = (unsigned long)xtime.tv_sec;
807
808         switch (policy->action) {
809         case XFRM_POLICY_BLOCK:
810                 /* Prohibit the flow */
811                 err = -EPERM;
812                 goto error;
813
814         case XFRM_POLICY_ALLOW:
815                 if (policy->xfrm_nr == 0) {
816                         /* Flow passes not transformed. */
817                         xfrm_pol_put(policy);
818                         return 0;
819                 }
820
821                 /* Try to find matching bundle.
822                  *
823                  * LATER: help from flow cache. It is optional, this
824                  * is required only for output policy.
825                  */
826                 dst = xfrm_find_bundle(fl, policy, family);
827                 if (IS_ERR(dst)) {
828                         err = PTR_ERR(dst);
829                         goto error;
830                 }
831
832                 if (dst)
833                         break;
834
835                 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
836
837                 if (unlikely(nx<0)) {
838                         err = nx;
839                         if (err == -EAGAIN && flags) {
840                                 DECLARE_WAITQUEUE(wait, current);
841
842                                 add_wait_queue(&km_waitq, &wait);
843                                 set_current_state(TASK_INTERRUPTIBLE);
844                                 schedule();
845                                 set_current_state(TASK_RUNNING);
846                                 remove_wait_queue(&km_waitq, &wait);
847
848                                 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
849
850                                 if (nx == -EAGAIN && signal_pending(current)) {
851                                         err = -ERESTART;
852                                         goto error;
853                                 }
854                                 if (nx == -EAGAIN ||
855                                     genid != atomic_read(&flow_cache_genid)) {
856                                         xfrm_pol_put(policy);
857                                         goto restart;
858                                 }
859                                 err = nx;
860                         }
861                         if (err < 0)
862                                 goto error;
863                 }
864                 if (nx == 0) {
865                         /* Flow passes not transformed. */
866                         xfrm_pol_put(policy);
867                         return 0;
868                 }
869
870                 dst = dst_orig;
871                 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
872
873                 if (unlikely(err)) {
874                         int i;
875                         for (i=0; i<nx; i++)
876                                 xfrm_state_put(xfrm[i]);
877                         goto error;
878                 }
879
880                 write_lock_bh(&policy->lock);
881                 if (unlikely(policy->dead || stale_bundle(dst))) {
882                         /* Wow! While we worked on resolving, this
883                          * policy has gone. Retry. It is not paranoia,
884                          * we just cannot enlist new bundle to dead object.
885                          * We can't enlist stable bundles either.
886                          */
887                         write_unlock_bh(&policy->lock);
888                         if (dst)
889                                 dst_free(dst);
890
891                         err = -EHOSTUNREACH;
892                         goto error;
893                 }
894                 dst->next = policy->bundles;
895                 policy->bundles = dst;
896                 dst_hold(dst);
897                 write_unlock_bh(&policy->lock);
898         }
899         *dst_p = dst;
900         dst_release(dst_orig);
901         xfrm_pol_put(policy);
902         return 0;
903
904 error:
905         dst_release(dst_orig);
906         xfrm_pol_put(policy);
907         *dst_p = NULL;
908         return err;
909 }
910 EXPORT_SYMBOL(xfrm_lookup);
911
912 /* When skb is transformed back to its "native" form, we have to
913  * check policy restrictions. At the moment we make this in maximally
914  * stupid way. Shame on me. :-) Of course, connected sockets must
915  * have policy cached at them.
916  */
917
918 static inline int
919 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 
920               unsigned short family)
921 {
922         if (xfrm_state_kern(x))
923                 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
924         return  x->id.proto == tmpl->id.proto &&
925                 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
926                 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
927                 x->props.mode == tmpl->mode &&
928                 (tmpl->aalgos & (1<<x->props.aalgo)) &&
929                 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
930 }
931
932 static inline int
933 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
934                unsigned short family)
935 {
936         int idx = start;
937
938         if (tmpl->optional) {
939                 if (!tmpl->mode)
940                         return start;
941         } else
942                 start = -1;
943         for (; idx < sp->len; idx++) {
944                 if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
945                         return ++idx;
946                 if (sp->x[idx].xvec->props.mode)
947                         break;
948         }
949         return start;
950 }
951
952 int
953 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
954 {
955         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
956
957         if (unlikely(afinfo == NULL))
958                 return -EAFNOSUPPORT;
959
960         afinfo->decode_session(skb, fl);
961         xfrm_policy_put_afinfo(afinfo);
962         return 0;
963 }
964 EXPORT_SYMBOL(xfrm_decode_session);
965
966 static inline int secpath_has_tunnel(struct sec_path *sp, int k)
967 {
968         for (; k < sp->len; k++) {
969                 if (sp->x[k].xvec->props.mode)
970                         return 1;
971         }
972
973         return 0;
974 }
975
976 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 
977                         unsigned short family)
978 {
979         struct xfrm_policy *pol;
980         struct flowi fl;
981         u8 fl_dir = policy_to_flow_dir(dir);
982         u32 sk_sid;
983
984         if (xfrm_decode_session(skb, &fl, family) < 0)
985                 return 0;
986         nf_nat_decode_session(skb, &fl, family);
987
988         sk_sid = security_sk_sid(sk, &fl, fl_dir);
989
990         /* First, check used SA against their selectors. */
991         if (skb->sp) {
992                 int i;
993
994                 for (i=skb->sp->len-1; i>=0; i--) {
995                         struct sec_decap_state *xvec = &(skb->sp->x[i]);
996                         if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
997                                 return 0;
998
999                         /* If there is a post_input processor, try running it */
1000                         if (xvec->xvec->type->post_input &&
1001                             (xvec->xvec->type->post_input)(xvec->xvec,
1002                                                            &(xvec->decap),
1003                                                            skb) != 0)
1004                                 return 0;
1005                 }
1006         }
1007
1008         pol = NULL;
1009         if (sk && sk->sk_policy[dir])
1010                 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid);
1011
1012         if (!pol)
1013                 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir,
1014                                         xfrm_policy_lookup);
1015
1016         if (!pol)
1017                 return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
1018
1019         pol->curlft.use_time = (unsigned long)xtime.tv_sec;
1020
1021         if (pol->action == XFRM_POLICY_ALLOW) {
1022                 struct sec_path *sp;
1023                 static struct sec_path dummy;
1024                 int i, k;
1025
1026                 if ((sp = skb->sp) == NULL)
1027                         sp = &dummy;
1028
1029                 /* For each tunnel xfrm, find the first matching tmpl.
1030                  * For each tmpl before that, find corresponding xfrm.
1031                  * Order is _important_. Later we will implement
1032                  * some barriers, but at the moment barriers
1033                  * are implied between each two transformations.
1034                  */
1035                 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
1036                         k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
1037                         if (k < 0)
1038                                 goto reject;
1039                 }
1040
1041                 if (secpath_has_tunnel(sp, k))
1042                         goto reject;
1043
1044                 xfrm_pol_put(pol);
1045                 return 1;
1046         }
1047
1048 reject:
1049         xfrm_pol_put(pol);
1050         return 0;
1051 }
1052 EXPORT_SYMBOL(__xfrm_policy_check);
1053
1054 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1055 {
1056         struct flowi fl;
1057
1058         if (xfrm_decode_session(skb, &fl, family) < 0)
1059                 return 0;
1060
1061         return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1062 }
1063 EXPORT_SYMBOL(__xfrm_route_forward);
1064
1065 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1066 {
1067         /* If it is marked obsolete, which is how we even get here,
1068          * then we have purged it from the policy bundle list and we
1069          * did that for a good reason.
1070          */
1071         return NULL;
1072 }
1073
1074 static int stale_bundle(struct dst_entry *dst)
1075 {
1076         return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
1077 }
1078
1079 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1080 {
1081         while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1082                 dst->dev = &loopback_dev;
1083                 dev_hold(&loopback_dev);
1084                 dev_put(dev);
1085         }
1086 }
1087 EXPORT_SYMBOL(xfrm_dst_ifdown);
1088
1089 static void xfrm_link_failure(struct sk_buff *skb)
1090 {
1091         /* Impossible. Such dst must be popped before reaches point of failure. */
1092         return;
1093 }
1094
1095 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1096 {
1097         if (dst) {
1098                 if (dst->obsolete) {
1099                         dst_release(dst);
1100                         dst = NULL;
1101                 }
1102         }
1103         return dst;
1104 }
1105
1106 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1107 {
1108         int i;
1109         struct xfrm_policy *pol;
1110         struct dst_entry *dst, **dstp, *gc_list = NULL;
1111
1112         read_lock_bh(&xfrm_policy_lock);
1113         for (i=0; i<2*XFRM_POLICY_MAX; i++) {
1114                 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
1115                         write_lock(&pol->lock);
1116                         dstp = &pol->bundles;
1117                         while ((dst=*dstp) != NULL) {
1118                                 if (func(dst)) {
1119                                         *dstp = dst->next;
1120                                         dst->next = gc_list;
1121                                         gc_list = dst;
1122                                 } else {
1123                                         dstp = &dst->next;
1124                                 }
1125                         }
1126                         write_unlock(&pol->lock);
1127                 }
1128         }
1129         read_unlock_bh(&xfrm_policy_lock);
1130
1131         while (gc_list) {
1132                 dst = gc_list;
1133                 gc_list = dst->next;
1134                 dst_free(dst);
1135         }
1136 }
1137
1138 static int unused_bundle(struct dst_entry *dst)
1139 {
1140         return !atomic_read(&dst->__refcnt);
1141 }
1142
1143 static void __xfrm_garbage_collect(void)
1144 {
1145         xfrm_prune_bundles(unused_bundle);
1146 }
1147
1148 int xfrm_flush_bundles(void)
1149 {
1150         xfrm_prune_bundles(stale_bundle);
1151         return 0;
1152 }
1153
1154 static int always_true(struct dst_entry *dst)
1155 {
1156         return 1;
1157 }
1158
1159 void xfrm_flush_all_bundles(void)
1160 {
1161         xfrm_prune_bundles(always_true);
1162 }
1163
1164 void xfrm_init_pmtu(struct dst_entry *dst)
1165 {
1166         do {
1167                 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1168                 u32 pmtu, route_mtu_cached;
1169
1170                 pmtu = dst_mtu(dst->child);
1171                 xdst->child_mtu_cached = pmtu;
1172
1173                 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1174
1175                 route_mtu_cached = dst_mtu(xdst->route);
1176                 xdst->route_mtu_cached = route_mtu_cached;
1177
1178                 if (pmtu > route_mtu_cached)
1179                         pmtu = route_mtu_cached;
1180
1181                 dst->metrics[RTAX_MTU-1] = pmtu;
1182         } while ((dst = dst->next));
1183 }
1184
1185 EXPORT_SYMBOL(xfrm_init_pmtu);
1186
1187 /* Check that the bundle accepts the flow and its components are
1188  * still valid.
1189  */
1190
1191 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1192 {
1193         struct dst_entry *dst = &first->u.dst;
1194         struct xfrm_dst *last;
1195         u32 mtu;
1196
1197         if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1198             (dst->dev && !netif_running(dst->dev)))
1199                 return 0;
1200
1201         last = NULL;
1202
1203         do {
1204                 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1205
1206                 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1207                         return 0;
1208                 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1209                         return 0;
1210
1211                 mtu = dst_mtu(dst->child);
1212                 if (xdst->child_mtu_cached != mtu) {
1213                         last = xdst;
1214                         xdst->child_mtu_cached = mtu;
1215                 }
1216
1217                 if (!dst_check(xdst->route, xdst->route_cookie))
1218                         return 0;
1219                 mtu = dst_mtu(xdst->route);
1220                 if (xdst->route_mtu_cached != mtu) {
1221                         last = xdst;
1222                         xdst->route_mtu_cached = mtu;
1223                 }
1224
1225                 dst = dst->child;
1226         } while (dst->xfrm);
1227
1228         if (likely(!last))
1229                 return 1;
1230
1231         mtu = last->child_mtu_cached;
1232         for (;;) {
1233                 dst = &last->u.dst;
1234
1235                 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1236                 if (mtu > last->route_mtu_cached)
1237                         mtu = last->route_mtu_cached;
1238                 dst->metrics[RTAX_MTU-1] = mtu;
1239
1240                 if (last == first)
1241                         break;
1242
1243                 last = last->u.next;
1244                 last->child_mtu_cached = mtu;
1245         }
1246
1247         return 1;
1248 }
1249
1250 EXPORT_SYMBOL(xfrm_bundle_ok);
1251
1252 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1253 {
1254         int err = 0;
1255         if (unlikely(afinfo == NULL))
1256                 return -EINVAL;
1257         if (unlikely(afinfo->family >= NPROTO))
1258                 return -EAFNOSUPPORT;
1259         write_lock(&xfrm_policy_afinfo_lock);
1260         if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
1261                 err = -ENOBUFS;
1262         else {
1263                 struct dst_ops *dst_ops = afinfo->dst_ops;
1264                 if (likely(dst_ops->kmem_cachep == NULL))
1265                         dst_ops->kmem_cachep = xfrm_dst_cache;
1266                 if (likely(dst_ops->check == NULL))
1267                         dst_ops->check = xfrm_dst_check;
1268                 if (likely(dst_ops->negative_advice == NULL))
1269                         dst_ops->negative_advice = xfrm_negative_advice;
1270                 if (likely(dst_ops->link_failure == NULL))
1271                         dst_ops->link_failure = xfrm_link_failure;
1272                 if (likely(afinfo->garbage_collect == NULL))
1273                         afinfo->garbage_collect = __xfrm_garbage_collect;
1274                 xfrm_policy_afinfo[afinfo->family] = afinfo;
1275         }
1276         write_unlock(&xfrm_policy_afinfo_lock);
1277         return err;
1278 }
1279 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
1280
1281 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1282 {
1283         int err = 0;
1284         if (unlikely(afinfo == NULL))
1285                 return -EINVAL;
1286         if (unlikely(afinfo->family >= NPROTO))
1287                 return -EAFNOSUPPORT;
1288         write_lock(&xfrm_policy_afinfo_lock);
1289         if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
1290                 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
1291                         err = -EINVAL;
1292                 else {
1293                         struct dst_ops *dst_ops = afinfo->dst_ops;
1294                         xfrm_policy_afinfo[afinfo->family] = NULL;
1295                         dst_ops->kmem_cachep = NULL;
1296                         dst_ops->check = NULL;
1297                         dst_ops->negative_advice = NULL;
1298                         dst_ops->link_failure = NULL;
1299                         afinfo->garbage_collect = NULL;
1300                 }
1301         }
1302         write_unlock(&xfrm_policy_afinfo_lock);
1303         return err;
1304 }
1305 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
1306
1307 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
1308 {
1309         struct xfrm_policy_afinfo *afinfo;
1310         if (unlikely(family >= NPROTO))
1311                 return NULL;
1312         read_lock(&xfrm_policy_afinfo_lock);
1313         afinfo = xfrm_policy_afinfo[family];
1314         if (likely(afinfo != NULL))
1315                 read_lock(&afinfo->lock);
1316         read_unlock(&xfrm_policy_afinfo_lock);
1317         return afinfo;
1318 }
1319
1320 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
1321 {
1322         if (unlikely(afinfo == NULL))
1323                 return;
1324         read_unlock(&afinfo->lock);
1325 }
1326
1327 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
1328 {
1329         switch (event) {
1330         case NETDEV_DOWN:
1331                 xfrm_flush_bundles();
1332         }
1333         return NOTIFY_DONE;
1334 }
1335
1336 static struct notifier_block xfrm_dev_notifier = {
1337         xfrm_dev_event,
1338         NULL,
1339         0
1340 };
1341
1342 static void __init xfrm_policy_init(void)
1343 {
1344         xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
1345                                            sizeof(struct xfrm_dst),
1346                                            0, SLAB_HWCACHE_ALIGN,
1347                                            NULL, NULL);
1348         if (!xfrm_dst_cache)
1349                 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1350
1351         INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
1352         register_netdevice_notifier(&xfrm_dev_notifier);
1353 }
1354
1355 void __init xfrm_init(void)
1356 {
1357         xfrm_state_init();
1358         xfrm_policy_init();
1359         xfrm_input_init();
1360 }
1361