Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-2.6] / net / xfrm / xfrm_policy.c
1 /* 
2  * xfrm_policy.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      Kazunori MIYAZAWA @USAGI
10  *      YOSHIFUJI Hideaki
11  *              Split up af-specific portion
12  *      Derek Atkins <derek@ihtfp.com>          Add the post_input processor
13  *
14  */
15
16 #include <asm/bug.h>
17 #include <linux/config.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <net/xfrm.h>
28 #include <net/ip.h>
29
30 DECLARE_MUTEX(xfrm_cfg_sem);
31 EXPORT_SYMBOL(xfrm_cfg_sem);
32
33 static DEFINE_RWLOCK(xfrm_policy_lock);
34
35 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
36 EXPORT_SYMBOL(xfrm_policy_list);
37
38 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
39 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
40
41 static kmem_cache_t *xfrm_dst_cache __read_mostly;
42
43 static struct work_struct xfrm_policy_gc_work;
44 static struct list_head xfrm_policy_gc_list =
45         LIST_HEAD_INIT(xfrm_policy_gc_list);
46 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
47
48 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
49 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
50
51 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
52 {
53         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
54         struct xfrm_type_map *typemap;
55         int err = 0;
56
57         if (unlikely(afinfo == NULL))
58                 return -EAFNOSUPPORT;
59         typemap = afinfo->type_map;
60
61         write_lock(&typemap->lock);
62         if (likely(typemap->map[type->proto] == NULL))
63                 typemap->map[type->proto] = type;
64         else
65                 err = -EEXIST;
66         write_unlock(&typemap->lock);
67         xfrm_policy_put_afinfo(afinfo);
68         return err;
69 }
70 EXPORT_SYMBOL(xfrm_register_type);
71
72 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
73 {
74         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
75         struct xfrm_type_map *typemap;
76         int err = 0;
77
78         if (unlikely(afinfo == NULL))
79                 return -EAFNOSUPPORT;
80         typemap = afinfo->type_map;
81
82         write_lock(&typemap->lock);
83         if (unlikely(typemap->map[type->proto] != type))
84                 err = -ENOENT;
85         else
86                 typemap->map[type->proto] = NULL;
87         write_unlock(&typemap->lock);
88         xfrm_policy_put_afinfo(afinfo);
89         return err;
90 }
91 EXPORT_SYMBOL(xfrm_unregister_type);
92
93 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
94 {
95         struct xfrm_policy_afinfo *afinfo;
96         struct xfrm_type_map *typemap;
97         struct xfrm_type *type;
98         int modload_attempted = 0;
99
100 retry:
101         afinfo = xfrm_policy_get_afinfo(family);
102         if (unlikely(afinfo == NULL))
103                 return NULL;
104         typemap = afinfo->type_map;
105
106         read_lock(&typemap->lock);
107         type = typemap->map[proto];
108         if (unlikely(type && !try_module_get(type->owner)))
109                 type = NULL;
110         read_unlock(&typemap->lock);
111         if (!type && !modload_attempted) {
112                 xfrm_policy_put_afinfo(afinfo);
113                 request_module("xfrm-type-%d-%d",
114                                (int) family, (int) proto);
115                 modload_attempted = 1;
116                 goto retry;
117         }
118
119         xfrm_policy_put_afinfo(afinfo);
120         return type;
121 }
122
123 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 
124                     unsigned short family)
125 {
126         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
127         int err = 0;
128
129         if (unlikely(afinfo == NULL))
130                 return -EAFNOSUPPORT;
131
132         if (likely(afinfo->dst_lookup != NULL))
133                 err = afinfo->dst_lookup(dst, fl);
134         else
135                 err = -EINVAL;
136         xfrm_policy_put_afinfo(afinfo);
137         return err;
138 }
139 EXPORT_SYMBOL(xfrm_dst_lookup);
140
141 void xfrm_put_type(struct xfrm_type *type)
142 {
143         module_put(type->owner);
144 }
145
146 static inline unsigned long make_jiffies(long secs)
147 {
148         if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
149                 return MAX_SCHEDULE_TIMEOUT-1;
150         else
151                 return secs*HZ;
152 }
153
154 static void xfrm_policy_timer(unsigned long data)
155 {
156         struct xfrm_policy *xp = (struct xfrm_policy*)data;
157         unsigned long now = (unsigned long)xtime.tv_sec;
158         long next = LONG_MAX;
159         int warn = 0;
160         int dir;
161
162         read_lock(&xp->lock);
163
164         if (xp->dead)
165                 goto out;
166
167         dir = xfrm_policy_id2dir(xp->index);
168
169         if (xp->lft.hard_add_expires_seconds) {
170                 long tmo = xp->lft.hard_add_expires_seconds +
171                         xp->curlft.add_time - now;
172                 if (tmo <= 0)
173                         goto expired;
174                 if (tmo < next)
175                         next = tmo;
176         }
177         if (xp->lft.hard_use_expires_seconds) {
178                 long tmo = xp->lft.hard_use_expires_seconds +
179                         (xp->curlft.use_time ? : xp->curlft.add_time) - now;
180                 if (tmo <= 0)
181                         goto expired;
182                 if (tmo < next)
183                         next = tmo;
184         }
185         if (xp->lft.soft_add_expires_seconds) {
186                 long tmo = xp->lft.soft_add_expires_seconds +
187                         xp->curlft.add_time - now;
188                 if (tmo <= 0) {
189                         warn = 1;
190                         tmo = XFRM_KM_TIMEOUT;
191                 }
192                 if (tmo < next)
193                         next = tmo;
194         }
195         if (xp->lft.soft_use_expires_seconds) {
196                 long tmo = xp->lft.soft_use_expires_seconds +
197                         (xp->curlft.use_time ? : xp->curlft.add_time) - now;
198                 if (tmo <= 0) {
199                         warn = 1;
200                         tmo = XFRM_KM_TIMEOUT;
201                 }
202                 if (tmo < next)
203                         next = tmo;
204         }
205
206         if (warn)
207                 km_policy_expired(xp, dir, 0);
208         if (next != LONG_MAX &&
209             !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
210                 xfrm_pol_hold(xp);
211
212 out:
213         read_unlock(&xp->lock);
214         xfrm_pol_put(xp);
215         return;
216
217 expired:
218         read_unlock(&xp->lock);
219         if (!xfrm_policy_delete(xp, dir))
220                 km_policy_expired(xp, dir, 1);
221         xfrm_pol_put(xp);
222 }
223
224
225 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
226  * SPD calls.
227  */
228
229 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
230 {
231         struct xfrm_policy *policy;
232
233         policy = kmalloc(sizeof(struct xfrm_policy), gfp);
234
235         if (policy) {
236                 memset(policy, 0, sizeof(struct xfrm_policy));
237                 atomic_set(&policy->refcnt, 1);
238                 rwlock_init(&policy->lock);
239                 init_timer(&policy->timer);
240                 policy->timer.data = (unsigned long)policy;
241                 policy->timer.function = xfrm_policy_timer;
242         }
243         return policy;
244 }
245 EXPORT_SYMBOL(xfrm_policy_alloc);
246
247 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
248
249 void __xfrm_policy_destroy(struct xfrm_policy *policy)
250 {
251         BUG_ON(!policy->dead);
252
253         BUG_ON(policy->bundles);
254
255         if (del_timer(&policy->timer))
256                 BUG();
257
258         security_xfrm_policy_free(policy);
259         kfree(policy);
260 }
261 EXPORT_SYMBOL(__xfrm_policy_destroy);
262
263 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
264 {
265         struct dst_entry *dst;
266
267         while ((dst = policy->bundles) != NULL) {
268                 policy->bundles = dst->next;
269                 dst_free(dst);
270         }
271
272         if (del_timer(&policy->timer))
273                 atomic_dec(&policy->refcnt);
274
275         if (atomic_read(&policy->refcnt) > 1)
276                 flow_cache_flush();
277
278         xfrm_pol_put(policy);
279 }
280
281 static void xfrm_policy_gc_task(void *data)
282 {
283         struct xfrm_policy *policy;
284         struct list_head *entry, *tmp;
285         struct list_head gc_list = LIST_HEAD_INIT(gc_list);
286
287         spin_lock_bh(&xfrm_policy_gc_lock);
288         list_splice_init(&xfrm_policy_gc_list, &gc_list);
289         spin_unlock_bh(&xfrm_policy_gc_lock);
290
291         list_for_each_safe(entry, tmp, &gc_list) {
292                 policy = list_entry(entry, struct xfrm_policy, list);
293                 xfrm_policy_gc_kill(policy);
294         }
295 }
296
297 /* Rule must be locked. Release descentant resources, announce
298  * entry dead. The rule must be unlinked from lists to the moment.
299  */
300
301 static void xfrm_policy_kill(struct xfrm_policy *policy)
302 {
303         int dead;
304
305         write_lock_bh(&policy->lock);
306         dead = policy->dead;
307         policy->dead = 1;
308         write_unlock_bh(&policy->lock);
309
310         if (unlikely(dead)) {
311                 WARN_ON(1);
312                 return;
313         }
314
315         spin_lock(&xfrm_policy_gc_lock);
316         list_add(&policy->list, &xfrm_policy_gc_list);
317         spin_unlock(&xfrm_policy_gc_lock);
318
319         schedule_work(&xfrm_policy_gc_work);
320 }
321
322 /* Generate new index... KAME seems to generate them ordered by cost
323  * of an absolute inpredictability of ordering of rules. This will not pass. */
324 static u32 xfrm_gen_index(int dir)
325 {
326         u32 idx;
327         struct xfrm_policy *p;
328         static u32 idx_generator;
329
330         for (;;) {
331                 idx = (idx_generator | dir);
332                 idx_generator += 8;
333                 if (idx == 0)
334                         idx = 8;
335                 for (p = xfrm_policy_list[dir]; p; p = p->next) {
336                         if (p->index == idx)
337                                 break;
338                 }
339                 if (!p)
340                         return idx;
341         }
342 }
343
344 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
345 {
346         struct xfrm_policy *pol, **p;
347         struct xfrm_policy *delpol = NULL;
348         struct xfrm_policy **newpos = NULL;
349         struct dst_entry *gc_list;
350
351         write_lock_bh(&xfrm_policy_lock);
352         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
353                 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 &&
354                     xfrm_sec_ctx_match(pol->security, policy->security)) {
355                         if (excl) {
356                                 write_unlock_bh(&xfrm_policy_lock);
357                                 return -EEXIST;
358                         }
359                         *p = pol->next;
360                         delpol = pol;
361                         if (policy->priority > pol->priority)
362                                 continue;
363                 } else if (policy->priority >= pol->priority) {
364                         p = &pol->next;
365                         continue;
366                 }
367                 if (!newpos)
368                         newpos = p;
369                 if (delpol)
370                         break;
371                 p = &pol->next;
372         }
373         if (newpos)
374                 p = newpos;
375         xfrm_pol_hold(policy);
376         policy->next = *p;
377         *p = policy;
378         atomic_inc(&flow_cache_genid);
379         policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
380         policy->curlft.add_time = (unsigned long)xtime.tv_sec;
381         policy->curlft.use_time = 0;
382         if (!mod_timer(&policy->timer, jiffies + HZ))
383                 xfrm_pol_hold(policy);
384         write_unlock_bh(&xfrm_policy_lock);
385
386         if (delpol)
387                 xfrm_policy_kill(delpol);
388
389         read_lock_bh(&xfrm_policy_lock);
390         gc_list = NULL;
391         for (policy = policy->next; policy; policy = policy->next) {
392                 struct dst_entry *dst;
393
394                 write_lock(&policy->lock);
395                 dst = policy->bundles;
396                 if (dst) {
397                         struct dst_entry *tail = dst;
398                         while (tail->next)
399                                 tail = tail->next;
400                         tail->next = gc_list;
401                         gc_list = dst;
402
403                         policy->bundles = NULL;
404                 }
405                 write_unlock(&policy->lock);
406         }
407         read_unlock_bh(&xfrm_policy_lock);
408
409         while (gc_list) {
410                 struct dst_entry *dst = gc_list;
411
412                 gc_list = dst->next;
413                 dst_free(dst);
414         }
415
416         return 0;
417 }
418 EXPORT_SYMBOL(xfrm_policy_insert);
419
420 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel,
421                                           struct xfrm_sec_ctx *ctx, int delete)
422 {
423         struct xfrm_policy *pol, **p;
424
425         write_lock_bh(&xfrm_policy_lock);
426         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
427                 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) &&
428                     (xfrm_sec_ctx_match(ctx, pol->security))) {
429                         xfrm_pol_hold(pol);
430                         if (delete)
431                                 *p = pol->next;
432                         break;
433                 }
434         }
435         write_unlock_bh(&xfrm_policy_lock);
436
437         if (pol && delete) {
438                 atomic_inc(&flow_cache_genid);
439                 xfrm_policy_kill(pol);
440         }
441         return pol;
442 }
443 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
444
445 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
446 {
447         struct xfrm_policy *pol, **p;
448
449         write_lock_bh(&xfrm_policy_lock);
450         for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
451                 if (pol->index == id) {
452                         xfrm_pol_hold(pol);
453                         if (delete)
454                                 *p = pol->next;
455                         break;
456                 }
457         }
458         write_unlock_bh(&xfrm_policy_lock);
459
460         if (pol && delete) {
461                 atomic_inc(&flow_cache_genid);
462                 xfrm_policy_kill(pol);
463         }
464         return pol;
465 }
466 EXPORT_SYMBOL(xfrm_policy_byid);
467
468 void xfrm_policy_flush(void)
469 {
470         struct xfrm_policy *xp;
471         int dir;
472
473         write_lock_bh(&xfrm_policy_lock);
474         for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
475                 while ((xp = xfrm_policy_list[dir]) != NULL) {
476                         xfrm_policy_list[dir] = xp->next;
477                         write_unlock_bh(&xfrm_policy_lock);
478
479                         xfrm_policy_kill(xp);
480
481                         write_lock_bh(&xfrm_policy_lock);
482                 }
483         }
484         atomic_inc(&flow_cache_genid);
485         write_unlock_bh(&xfrm_policy_lock);
486 }
487 EXPORT_SYMBOL(xfrm_policy_flush);
488
489 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
490                      void *data)
491 {
492         struct xfrm_policy *xp;
493         int dir;
494         int count = 0;
495         int error = 0;
496
497         read_lock_bh(&xfrm_policy_lock);
498         for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
499                 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
500                         count++;
501         }
502
503         if (count == 0) {
504                 error = -ENOENT;
505                 goto out;
506         }
507
508         for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
509                 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
510                         error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
511                         if (error)
512                                 goto out;
513                 }
514         }
515
516 out:
517         read_unlock_bh(&xfrm_policy_lock);
518         return error;
519 }
520 EXPORT_SYMBOL(xfrm_policy_walk);
521
522 /* Find policy to apply to this flow. */
523
524 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir,
525                                void **objp, atomic_t **obj_refp)
526 {
527         struct xfrm_policy *pol;
528
529         read_lock_bh(&xfrm_policy_lock);
530         for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
531                 struct xfrm_selector *sel = &pol->selector;
532                 int match;
533
534                 if (pol->family != family)
535                         continue;
536
537                 match = xfrm_selector_match(sel, fl, family);
538
539                 if (match) {
540                         if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) {
541                                 xfrm_pol_hold(pol);
542                                 break;
543                         }
544                 }
545         }
546         read_unlock_bh(&xfrm_policy_lock);
547         if ((*objp = (void *) pol) != NULL)
548                 *obj_refp = &pol->refcnt;
549 }
550
551 static inline int policy_to_flow_dir(int dir)
552 {
553         if (XFRM_POLICY_IN == FLOW_DIR_IN &&
554             XFRM_POLICY_OUT == FLOW_DIR_OUT &&
555             XFRM_POLICY_FWD == FLOW_DIR_FWD)
556                 return dir;
557         switch (dir) {
558         default:
559         case XFRM_POLICY_IN:
560                 return FLOW_DIR_IN;
561         case XFRM_POLICY_OUT:
562                 return FLOW_DIR_OUT;
563         case XFRM_POLICY_FWD:
564                 return FLOW_DIR_FWD;
565         };
566 }
567
568 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid)
569 {
570         struct xfrm_policy *pol;
571
572         read_lock_bh(&xfrm_policy_lock);
573         if ((pol = sk->sk_policy[dir]) != NULL) {
574                 int match = xfrm_selector_match(&pol->selector, fl,
575                                                 sk->sk_family);
576                 int err = 0;
577
578                 if (match)
579                   err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir));
580
581                 if (match && !err)
582                         xfrm_pol_hold(pol);
583                 else
584                         pol = NULL;
585         }
586         read_unlock_bh(&xfrm_policy_lock);
587         return pol;
588 }
589
590 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
591 {
592         pol->next = xfrm_policy_list[dir];
593         xfrm_policy_list[dir] = pol;
594         xfrm_pol_hold(pol);
595 }
596
597 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
598                                                 int dir)
599 {
600         struct xfrm_policy **polp;
601
602         for (polp = &xfrm_policy_list[dir];
603              *polp != NULL; polp = &(*polp)->next) {
604                 if (*polp == pol) {
605                         *polp = pol->next;
606                         return pol;
607                 }
608         }
609         return NULL;
610 }
611
612 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
613 {
614         write_lock_bh(&xfrm_policy_lock);
615         pol = __xfrm_policy_unlink(pol, dir);
616         write_unlock_bh(&xfrm_policy_lock);
617         if (pol) {
618                 if (dir < XFRM_POLICY_MAX)
619                         atomic_inc(&flow_cache_genid);
620                 xfrm_policy_kill(pol);
621                 return 0;
622         }
623         return -ENOENT;
624 }
625
626 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
627 {
628         struct xfrm_policy *old_pol;
629
630         write_lock_bh(&xfrm_policy_lock);
631         old_pol = sk->sk_policy[dir];
632         sk->sk_policy[dir] = pol;
633         if (pol) {
634                 pol->curlft.add_time = (unsigned long)xtime.tv_sec;
635                 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
636                 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
637         }
638         if (old_pol)
639                 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
640         write_unlock_bh(&xfrm_policy_lock);
641
642         if (old_pol) {
643                 xfrm_policy_kill(old_pol);
644         }
645         return 0;
646 }
647
648 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
649 {
650         struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
651
652         if (newp) {
653                 newp->selector = old->selector;
654                 if (security_xfrm_policy_clone(old, newp)) {
655                         kfree(newp);
656                         return NULL;  /* ENOMEM */
657                 }
658                 newp->lft = old->lft;
659                 newp->curlft = old->curlft;
660                 newp->action = old->action;
661                 newp->flags = old->flags;
662                 newp->xfrm_nr = old->xfrm_nr;
663                 newp->index = old->index;
664                 memcpy(newp->xfrm_vec, old->xfrm_vec,
665                        newp->xfrm_nr*sizeof(struct xfrm_tmpl));
666                 write_lock_bh(&xfrm_policy_lock);
667                 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
668                 write_unlock_bh(&xfrm_policy_lock);
669                 xfrm_pol_put(newp);
670         }
671         return newp;
672 }
673
674 int __xfrm_sk_clone_policy(struct sock *sk)
675 {
676         struct xfrm_policy *p0 = sk->sk_policy[0],
677                            *p1 = sk->sk_policy[1];
678
679         sk->sk_policy[0] = sk->sk_policy[1] = NULL;
680         if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
681                 return -ENOMEM;
682         if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
683                 return -ENOMEM;
684         return 0;
685 }
686
687 /* Resolve list of templates for the flow, given policy. */
688
689 static int
690 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
691                   struct xfrm_state **xfrm,
692                   unsigned short family)
693 {
694         int nx;
695         int i, error;
696         xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
697         xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
698
699         for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
700                 struct xfrm_state *x;
701                 xfrm_address_t *remote = daddr;
702                 xfrm_address_t *local  = saddr;
703                 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
704
705                 if (tmpl->mode) {
706                         remote = &tmpl->id.daddr;
707                         local = &tmpl->saddr;
708                 }
709
710                 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
711
712                 if (x && x->km.state == XFRM_STATE_VALID) {
713                         xfrm[nx++] = x;
714                         daddr = remote;
715                         saddr = local;
716                         continue;
717                 }
718                 if (x) {
719                         error = (x->km.state == XFRM_STATE_ERROR ?
720                                  -EINVAL : -EAGAIN);
721                         xfrm_state_put(x);
722                 }
723
724                 if (!tmpl->optional)
725                         goto fail;
726         }
727         return nx;
728
729 fail:
730         for (nx--; nx>=0; nx--)
731                 xfrm_state_put(xfrm[nx]);
732         return error;
733 }
734
735 /* Check that the bundle accepts the flow and its components are
736  * still valid.
737  */
738
739 static struct dst_entry *
740 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
741 {
742         struct dst_entry *x;
743         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
744         if (unlikely(afinfo == NULL))
745                 return ERR_PTR(-EINVAL);
746         x = afinfo->find_bundle(fl, policy);
747         xfrm_policy_put_afinfo(afinfo);
748         return x;
749 }
750
751 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
752  * all the metrics... Shortly, bundle a bundle.
753  */
754
755 static int
756 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
757                    struct flowi *fl, struct dst_entry **dst_p,
758                    unsigned short family)
759 {
760         int err;
761         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
762         if (unlikely(afinfo == NULL))
763                 return -EINVAL;
764         err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
765         xfrm_policy_put_afinfo(afinfo);
766         return err;
767 }
768
769
770 static int stale_bundle(struct dst_entry *dst);
771
772 /* Main function: finds/creates a bundle for given flow.
773  *
774  * At the moment we eat a raw IP route. Mostly to speed up lookups
775  * on interfaces with disabled IPsec.
776  */
777 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
778                 struct sock *sk, int flags)
779 {
780         struct xfrm_policy *policy;
781         struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
782         struct dst_entry *dst, *dst_orig = *dst_p;
783         int nx = 0;
784         int err;
785         u32 genid;
786         u16 family = dst_orig->ops->family;
787         u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
788         u32 sk_sid = security_sk_sid(sk, fl, dir);
789 restart:
790         genid = atomic_read(&flow_cache_genid);
791         policy = NULL;
792         if (sk && sk->sk_policy[1])
793                 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid);
794
795         if (!policy) {
796                 /* To accelerate a bit...  */
797                 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
798                         return 0;
799
800                 policy = flow_cache_lookup(fl, sk_sid, family, dir,
801                                            xfrm_policy_lookup);
802         }
803
804         if (!policy)
805                 return 0;
806
807         policy->curlft.use_time = (unsigned long)xtime.tv_sec;
808
809         switch (policy->action) {
810         case XFRM_POLICY_BLOCK:
811                 /* Prohibit the flow */
812                 err = -EPERM;
813                 goto error;
814
815         case XFRM_POLICY_ALLOW:
816                 if (policy->xfrm_nr == 0) {
817                         /* Flow passes not transformed. */
818                         xfrm_pol_put(policy);
819                         return 0;
820                 }
821
822                 /* Try to find matching bundle.
823                  *
824                  * LATER: help from flow cache. It is optional, this
825                  * is required only for output policy.
826                  */
827                 dst = xfrm_find_bundle(fl, policy, family);
828                 if (IS_ERR(dst)) {
829                         err = PTR_ERR(dst);
830                         goto error;
831                 }
832
833                 if (dst)
834                         break;
835
836                 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
837
838                 if (unlikely(nx<0)) {
839                         err = nx;
840                         if (err == -EAGAIN && flags) {
841                                 DECLARE_WAITQUEUE(wait, current);
842
843                                 add_wait_queue(&km_waitq, &wait);
844                                 set_current_state(TASK_INTERRUPTIBLE);
845                                 schedule();
846                                 set_current_state(TASK_RUNNING);
847                                 remove_wait_queue(&km_waitq, &wait);
848
849                                 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
850
851                                 if (nx == -EAGAIN && signal_pending(current)) {
852                                         err = -ERESTART;
853                                         goto error;
854                                 }
855                                 if (nx == -EAGAIN ||
856                                     genid != atomic_read(&flow_cache_genid)) {
857                                         xfrm_pol_put(policy);
858                                         goto restart;
859                                 }
860                                 err = nx;
861                         }
862                         if (err < 0)
863                                 goto error;
864                 }
865                 if (nx == 0) {
866                         /* Flow passes not transformed. */
867                         xfrm_pol_put(policy);
868                         return 0;
869                 }
870
871                 dst = dst_orig;
872                 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
873
874                 if (unlikely(err)) {
875                         int i;
876                         for (i=0; i<nx; i++)
877                                 xfrm_state_put(xfrm[i]);
878                         goto error;
879                 }
880
881                 write_lock_bh(&policy->lock);
882                 if (unlikely(policy->dead || stale_bundle(dst))) {
883                         /* Wow! While we worked on resolving, this
884                          * policy has gone. Retry. It is not paranoia,
885                          * we just cannot enlist new bundle to dead object.
886                          * We can't enlist stable bundles either.
887                          */
888                         write_unlock_bh(&policy->lock);
889
890                         xfrm_pol_put(policy);
891                         if (dst)
892                                 dst_free(dst);
893                         goto restart;
894                 }
895                 dst->next = policy->bundles;
896                 policy->bundles = dst;
897                 dst_hold(dst);
898                 write_unlock_bh(&policy->lock);
899         }
900         *dst_p = dst;
901         dst_release(dst_orig);
902         xfrm_pol_put(policy);
903         return 0;
904
905 error:
906         dst_release(dst_orig);
907         xfrm_pol_put(policy);
908         *dst_p = NULL;
909         return err;
910 }
911 EXPORT_SYMBOL(xfrm_lookup);
912
913 /* When skb is transformed back to its "native" form, we have to
914  * check policy restrictions. At the moment we make this in maximally
915  * stupid way. Shame on me. :-) Of course, connected sockets must
916  * have policy cached at them.
917  */
918
919 static inline int
920 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 
921               unsigned short family)
922 {
923         if (xfrm_state_kern(x))
924                 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
925         return  x->id.proto == tmpl->id.proto &&
926                 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
927                 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
928                 x->props.mode == tmpl->mode &&
929                 (tmpl->aalgos & (1<<x->props.aalgo)) &&
930                 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
931 }
932
933 static inline int
934 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
935                unsigned short family)
936 {
937         int idx = start;
938
939         if (tmpl->optional) {
940                 if (!tmpl->mode)
941                         return start;
942         } else
943                 start = -1;
944         for (; idx < sp->len; idx++) {
945                 if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
946                         return ++idx;
947                 if (sp->x[idx].xvec->props.mode)
948                         break;
949         }
950         return start;
951 }
952
953 int
954 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
955 {
956         struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
957
958         if (unlikely(afinfo == NULL))
959                 return -EAFNOSUPPORT;
960
961         afinfo->decode_session(skb, fl);
962         xfrm_policy_put_afinfo(afinfo);
963         return 0;
964 }
965 EXPORT_SYMBOL(xfrm_decode_session);
966
967 static inline int secpath_has_tunnel(struct sec_path *sp, int k)
968 {
969         for (; k < sp->len; k++) {
970                 if (sp->x[k].xvec->props.mode)
971                         return 1;
972         }
973
974         return 0;
975 }
976
977 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 
978                         unsigned short family)
979 {
980         struct xfrm_policy *pol;
981         struct flowi fl;
982         u8 fl_dir = policy_to_flow_dir(dir);
983         u32 sk_sid;
984
985         if (xfrm_decode_session(skb, &fl, family) < 0)
986                 return 0;
987         nf_nat_decode_session(skb, &fl, family);
988
989         sk_sid = security_sk_sid(sk, &fl, fl_dir);
990
991         /* First, check used SA against their selectors. */
992         if (skb->sp) {
993                 int i;
994
995                 for (i=skb->sp->len-1; i>=0; i--) {
996                         struct sec_decap_state *xvec = &(skb->sp->x[i]);
997                         if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
998                                 return 0;
999
1000                         /* If there is a post_input processor, try running it */
1001                         if (xvec->xvec->type->post_input &&
1002                             (xvec->xvec->type->post_input)(xvec->xvec,
1003                                                            &(xvec->decap),
1004                                                            skb) != 0)
1005                                 return 0;
1006                 }
1007         }
1008
1009         pol = NULL;
1010         if (sk && sk->sk_policy[dir])
1011                 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid);
1012
1013         if (!pol)
1014                 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir,
1015                                         xfrm_policy_lookup);
1016
1017         if (!pol)
1018                 return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
1019
1020         pol->curlft.use_time = (unsigned long)xtime.tv_sec;
1021
1022         if (pol->action == XFRM_POLICY_ALLOW) {
1023                 struct sec_path *sp;
1024                 static struct sec_path dummy;
1025                 int i, k;
1026
1027                 if ((sp = skb->sp) == NULL)
1028                         sp = &dummy;
1029
1030                 /* For each tunnel xfrm, find the first matching tmpl.
1031                  * For each tmpl before that, find corresponding xfrm.
1032                  * Order is _important_. Later we will implement
1033                  * some barriers, but at the moment barriers
1034                  * are implied between each two transformations.
1035                  */
1036                 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
1037                         k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
1038                         if (k < 0)
1039                                 goto reject;
1040                 }
1041
1042                 if (secpath_has_tunnel(sp, k))
1043                         goto reject;
1044
1045                 xfrm_pol_put(pol);
1046                 return 1;
1047         }
1048
1049 reject:
1050         xfrm_pol_put(pol);
1051         return 0;
1052 }
1053 EXPORT_SYMBOL(__xfrm_policy_check);
1054
1055 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1056 {
1057         struct flowi fl;
1058
1059         if (xfrm_decode_session(skb, &fl, family) < 0)
1060                 return 0;
1061
1062         return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1063 }
1064 EXPORT_SYMBOL(__xfrm_route_forward);
1065
1066 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1067 {
1068         /* If it is marked obsolete, which is how we even get here,
1069          * then we have purged it from the policy bundle list and we
1070          * did that for a good reason.
1071          */
1072         return NULL;
1073 }
1074
1075 static int stale_bundle(struct dst_entry *dst)
1076 {
1077         return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
1078 }
1079
1080 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1081 {
1082         while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1083                 dst->dev = &loopback_dev;
1084                 dev_hold(&loopback_dev);
1085                 dev_put(dev);
1086         }
1087 }
1088 EXPORT_SYMBOL(xfrm_dst_ifdown);
1089
1090 static void xfrm_link_failure(struct sk_buff *skb)
1091 {
1092         /* Impossible. Such dst must be popped before reaches point of failure. */
1093         return;
1094 }
1095
1096 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1097 {
1098         if (dst) {
1099                 if (dst->obsolete) {
1100                         dst_release(dst);
1101                         dst = NULL;
1102                 }
1103         }
1104         return dst;
1105 }
1106
1107 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1108 {
1109         int i;
1110         struct xfrm_policy *pol;
1111         struct dst_entry *dst, **dstp, *gc_list = NULL;
1112
1113         read_lock_bh(&xfrm_policy_lock);
1114         for (i=0; i<2*XFRM_POLICY_MAX; i++) {
1115                 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
1116                         write_lock(&pol->lock);
1117                         dstp = &pol->bundles;
1118                         while ((dst=*dstp) != NULL) {
1119                                 if (func(dst)) {
1120                                         *dstp = dst->next;
1121                                         dst->next = gc_list;
1122                                         gc_list = dst;
1123                                 } else {
1124                                         dstp = &dst->next;
1125                                 }
1126                         }
1127                         write_unlock(&pol->lock);
1128                 }
1129         }
1130         read_unlock_bh(&xfrm_policy_lock);
1131
1132         while (gc_list) {
1133                 dst = gc_list;
1134                 gc_list = dst->next;
1135                 dst_free(dst);
1136         }
1137 }
1138
1139 static int unused_bundle(struct dst_entry *dst)
1140 {
1141         return !atomic_read(&dst->__refcnt);
1142 }
1143
1144 static void __xfrm_garbage_collect(void)
1145 {
1146         xfrm_prune_bundles(unused_bundle);
1147 }
1148
1149 int xfrm_flush_bundles(void)
1150 {
1151         xfrm_prune_bundles(stale_bundle);
1152         return 0;
1153 }
1154
1155 static int always_true(struct dst_entry *dst)
1156 {
1157         return 1;
1158 }
1159
1160 void xfrm_flush_all_bundles(void)
1161 {
1162         xfrm_prune_bundles(always_true);
1163 }
1164
1165 void xfrm_init_pmtu(struct dst_entry *dst)
1166 {
1167         do {
1168                 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1169                 u32 pmtu, route_mtu_cached;
1170
1171                 pmtu = dst_mtu(dst->child);
1172                 xdst->child_mtu_cached = pmtu;
1173
1174                 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1175
1176                 route_mtu_cached = dst_mtu(xdst->route);
1177                 xdst->route_mtu_cached = route_mtu_cached;
1178
1179                 if (pmtu > route_mtu_cached)
1180                         pmtu = route_mtu_cached;
1181
1182                 dst->metrics[RTAX_MTU-1] = pmtu;
1183         } while ((dst = dst->next));
1184 }
1185
1186 EXPORT_SYMBOL(xfrm_init_pmtu);
1187
1188 /* Check that the bundle accepts the flow and its components are
1189  * still valid.
1190  */
1191
1192 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1193 {
1194         struct dst_entry *dst = &first->u.dst;
1195         struct xfrm_dst *last;
1196         u32 mtu;
1197
1198         if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1199             (dst->dev && !netif_running(dst->dev)))
1200                 return 0;
1201
1202         last = NULL;
1203
1204         do {
1205                 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1206
1207                 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1208                         return 0;
1209                 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1210                         return 0;
1211
1212                 mtu = dst_mtu(dst->child);
1213                 if (xdst->child_mtu_cached != mtu) {
1214                         last = xdst;
1215                         xdst->child_mtu_cached = mtu;
1216                 }
1217
1218                 if (!dst_check(xdst->route, xdst->route_cookie))
1219                         return 0;
1220                 mtu = dst_mtu(xdst->route);
1221                 if (xdst->route_mtu_cached != mtu) {
1222                         last = xdst;
1223                         xdst->route_mtu_cached = mtu;
1224                 }
1225
1226                 dst = dst->child;
1227         } while (dst->xfrm);
1228
1229         if (likely(!last))
1230                 return 1;
1231
1232         mtu = last->child_mtu_cached;
1233         for (;;) {
1234                 dst = &last->u.dst;
1235
1236                 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1237                 if (mtu > last->route_mtu_cached)
1238                         mtu = last->route_mtu_cached;
1239                 dst->metrics[RTAX_MTU-1] = mtu;
1240
1241                 if (last == first)
1242                         break;
1243
1244                 last = last->u.next;
1245                 last->child_mtu_cached = mtu;
1246         }
1247
1248         return 1;
1249 }
1250
1251 EXPORT_SYMBOL(xfrm_bundle_ok);
1252
1253 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1254 {
1255         int err = 0;
1256         if (unlikely(afinfo == NULL))
1257                 return -EINVAL;
1258         if (unlikely(afinfo->family >= NPROTO))
1259                 return -EAFNOSUPPORT;
1260         write_lock(&xfrm_policy_afinfo_lock);
1261         if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
1262                 err = -ENOBUFS;
1263         else {
1264                 struct dst_ops *dst_ops = afinfo->dst_ops;
1265                 if (likely(dst_ops->kmem_cachep == NULL))
1266                         dst_ops->kmem_cachep = xfrm_dst_cache;
1267                 if (likely(dst_ops->check == NULL))
1268                         dst_ops->check = xfrm_dst_check;
1269                 if (likely(dst_ops->negative_advice == NULL))
1270                         dst_ops->negative_advice = xfrm_negative_advice;
1271                 if (likely(dst_ops->link_failure == NULL))
1272                         dst_ops->link_failure = xfrm_link_failure;
1273                 if (likely(afinfo->garbage_collect == NULL))
1274                         afinfo->garbage_collect = __xfrm_garbage_collect;
1275                 xfrm_policy_afinfo[afinfo->family] = afinfo;
1276         }
1277         write_unlock(&xfrm_policy_afinfo_lock);
1278         return err;
1279 }
1280 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
1281
1282 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1283 {
1284         int err = 0;
1285         if (unlikely(afinfo == NULL))
1286                 return -EINVAL;
1287         if (unlikely(afinfo->family >= NPROTO))
1288                 return -EAFNOSUPPORT;
1289         write_lock(&xfrm_policy_afinfo_lock);
1290         if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
1291                 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
1292                         err = -EINVAL;
1293                 else {
1294                         struct dst_ops *dst_ops = afinfo->dst_ops;
1295                         xfrm_policy_afinfo[afinfo->family] = NULL;
1296                         dst_ops->kmem_cachep = NULL;
1297                         dst_ops->check = NULL;
1298                         dst_ops->negative_advice = NULL;
1299                         dst_ops->link_failure = NULL;
1300                         afinfo->garbage_collect = NULL;
1301                 }
1302         }
1303         write_unlock(&xfrm_policy_afinfo_lock);
1304         return err;
1305 }
1306 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
1307
1308 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
1309 {
1310         struct xfrm_policy_afinfo *afinfo;
1311         if (unlikely(family >= NPROTO))
1312                 return NULL;
1313         read_lock(&xfrm_policy_afinfo_lock);
1314         afinfo = xfrm_policy_afinfo[family];
1315         if (likely(afinfo != NULL))
1316                 read_lock(&afinfo->lock);
1317         read_unlock(&xfrm_policy_afinfo_lock);
1318         return afinfo;
1319 }
1320
1321 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
1322 {
1323         if (unlikely(afinfo == NULL))
1324                 return;
1325         read_unlock(&afinfo->lock);
1326 }
1327
1328 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
1329 {
1330         switch (event) {
1331         case NETDEV_DOWN:
1332                 xfrm_flush_bundles();
1333         }
1334         return NOTIFY_DONE;
1335 }
1336
1337 static struct notifier_block xfrm_dev_notifier = {
1338         xfrm_dev_event,
1339         NULL,
1340         0
1341 };
1342
1343 static void __init xfrm_policy_init(void)
1344 {
1345         xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
1346                                            sizeof(struct xfrm_dst),
1347                                            0, SLAB_HWCACHE_ALIGN,
1348                                            NULL, NULL);
1349         if (!xfrm_dst_cache)
1350                 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1351
1352         INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
1353         register_netdevice_notifier(&xfrm_dev_notifier);
1354 }
1355
1356 void __init xfrm_init(void)
1357 {
1358         xfrm_state_init();
1359         xfrm_policy_init();
1360         xfrm_input_init();
1361 }
1362