6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/slab.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/workqueue.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/module.h>
28 DEFINE_MUTEX(xfrm_cfg_mutex);
29 EXPORT_SYMBOL(xfrm_cfg_mutex);
31 static DEFINE_RWLOCK(xfrm_policy_lock);
33 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
34 EXPORT_SYMBOL(xfrm_policy_list);
36 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
37 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
39 static kmem_cache_t *xfrm_dst_cache __read_mostly;
41 static struct work_struct xfrm_policy_gc_work;
42 static struct list_head xfrm_policy_gc_list =
43 LIST_HEAD_INIT(xfrm_policy_gc_list);
44 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
46 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
47 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
48 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
49 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
51 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
53 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
54 struct xfrm_type **typemap;
57 if (unlikely(afinfo == NULL))
59 typemap = afinfo->type_map;
61 if (likely(typemap[type->proto] == NULL))
62 typemap[type->proto] = type;
65 xfrm_policy_unlock_afinfo(afinfo);
68 EXPORT_SYMBOL(xfrm_register_type);
70 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
72 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
73 struct xfrm_type **typemap;
76 if (unlikely(afinfo == NULL))
78 typemap = afinfo->type_map;
80 if (unlikely(typemap[type->proto] != type))
83 typemap[type->proto] = NULL;
84 xfrm_policy_unlock_afinfo(afinfo);
87 EXPORT_SYMBOL(xfrm_unregister_type);
89 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
91 struct xfrm_policy_afinfo *afinfo;
92 struct xfrm_type **typemap;
93 struct xfrm_type *type;
94 int modload_attempted = 0;
97 afinfo = xfrm_policy_get_afinfo(family);
98 if (unlikely(afinfo == NULL))
100 typemap = afinfo->type_map;
102 type = typemap[proto];
103 if (unlikely(type && !try_module_get(type->owner)))
105 if (!type && !modload_attempted) {
106 xfrm_policy_put_afinfo(afinfo);
107 request_module("xfrm-type-%d-%d",
108 (int) family, (int) proto);
109 modload_attempted = 1;
113 xfrm_policy_put_afinfo(afinfo);
117 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
118 unsigned short family)
120 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
123 if (unlikely(afinfo == NULL))
124 return -EAFNOSUPPORT;
126 if (likely(afinfo->dst_lookup != NULL))
127 err = afinfo->dst_lookup(dst, fl);
130 xfrm_policy_put_afinfo(afinfo);
133 EXPORT_SYMBOL(xfrm_dst_lookup);
135 void xfrm_put_type(struct xfrm_type *type)
137 module_put(type->owner);
140 int xfrm_register_mode(struct xfrm_mode *mode, int family)
142 struct xfrm_policy_afinfo *afinfo;
143 struct xfrm_mode **modemap;
146 if (unlikely(mode->encap >= XFRM_MODE_MAX))
149 afinfo = xfrm_policy_lock_afinfo(family);
150 if (unlikely(afinfo == NULL))
151 return -EAFNOSUPPORT;
154 modemap = afinfo->mode_map;
155 if (likely(modemap[mode->encap] == NULL)) {
156 modemap[mode->encap] = mode;
160 xfrm_policy_unlock_afinfo(afinfo);
163 EXPORT_SYMBOL(xfrm_register_mode);
165 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
167 struct xfrm_policy_afinfo *afinfo;
168 struct xfrm_mode **modemap;
171 if (unlikely(mode->encap >= XFRM_MODE_MAX))
174 afinfo = xfrm_policy_lock_afinfo(family);
175 if (unlikely(afinfo == NULL))
176 return -EAFNOSUPPORT;
179 modemap = afinfo->mode_map;
180 if (likely(modemap[mode->encap] == mode)) {
181 modemap[mode->encap] = NULL;
185 xfrm_policy_unlock_afinfo(afinfo);
188 EXPORT_SYMBOL(xfrm_unregister_mode);
190 struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
192 struct xfrm_policy_afinfo *afinfo;
193 struct xfrm_mode *mode;
194 int modload_attempted = 0;
196 if (unlikely(encap >= XFRM_MODE_MAX))
200 afinfo = xfrm_policy_get_afinfo(family);
201 if (unlikely(afinfo == NULL))
204 mode = afinfo->mode_map[encap];
205 if (unlikely(mode && !try_module_get(mode->owner)))
207 if (!mode && !modload_attempted) {
208 xfrm_policy_put_afinfo(afinfo);
209 request_module("xfrm-mode-%d-%d", family, encap);
210 modload_attempted = 1;
214 xfrm_policy_put_afinfo(afinfo);
218 void xfrm_put_mode(struct xfrm_mode *mode)
220 module_put(mode->owner);
223 static inline unsigned long make_jiffies(long secs)
225 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
226 return MAX_SCHEDULE_TIMEOUT-1;
231 static void xfrm_policy_timer(unsigned long data)
233 struct xfrm_policy *xp = (struct xfrm_policy*)data;
234 unsigned long now = (unsigned long)xtime.tv_sec;
235 long next = LONG_MAX;
239 read_lock(&xp->lock);
244 dir = xfrm_policy_id2dir(xp->index);
246 if (xp->lft.hard_add_expires_seconds) {
247 long tmo = xp->lft.hard_add_expires_seconds +
248 xp->curlft.add_time - now;
254 if (xp->lft.hard_use_expires_seconds) {
255 long tmo = xp->lft.hard_use_expires_seconds +
256 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
262 if (xp->lft.soft_add_expires_seconds) {
263 long tmo = xp->lft.soft_add_expires_seconds +
264 xp->curlft.add_time - now;
267 tmo = XFRM_KM_TIMEOUT;
272 if (xp->lft.soft_use_expires_seconds) {
273 long tmo = xp->lft.soft_use_expires_seconds +
274 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
277 tmo = XFRM_KM_TIMEOUT;
284 km_policy_expired(xp, dir, 0, 0);
285 if (next != LONG_MAX &&
286 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
290 read_unlock(&xp->lock);
295 read_unlock(&xp->lock);
296 if (!xfrm_policy_delete(xp, dir))
297 km_policy_expired(xp, dir, 1, 0);
302 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
306 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
308 struct xfrm_policy *policy;
310 policy = kmalloc(sizeof(struct xfrm_policy), gfp);
313 memset(policy, 0, sizeof(struct xfrm_policy));
314 atomic_set(&policy->refcnt, 1);
315 rwlock_init(&policy->lock);
316 init_timer(&policy->timer);
317 policy->timer.data = (unsigned long)policy;
318 policy->timer.function = xfrm_policy_timer;
322 EXPORT_SYMBOL(xfrm_policy_alloc);
324 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
326 void __xfrm_policy_destroy(struct xfrm_policy *policy)
328 BUG_ON(!policy->dead);
330 BUG_ON(policy->bundles);
332 if (del_timer(&policy->timer))
335 security_xfrm_policy_free(policy);
338 EXPORT_SYMBOL(__xfrm_policy_destroy);
340 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
342 struct dst_entry *dst;
344 while ((dst = policy->bundles) != NULL) {
345 policy->bundles = dst->next;
349 if (del_timer(&policy->timer))
350 atomic_dec(&policy->refcnt);
352 if (atomic_read(&policy->refcnt) > 1)
355 xfrm_pol_put(policy);
358 static void xfrm_policy_gc_task(void *data)
360 struct xfrm_policy *policy;
361 struct list_head *entry, *tmp;
362 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
364 spin_lock_bh(&xfrm_policy_gc_lock);
365 list_splice_init(&xfrm_policy_gc_list, &gc_list);
366 spin_unlock_bh(&xfrm_policy_gc_lock);
368 list_for_each_safe(entry, tmp, &gc_list) {
369 policy = list_entry(entry, struct xfrm_policy, list);
370 xfrm_policy_gc_kill(policy);
374 /* Rule must be locked. Release descentant resources, announce
375 * entry dead. The rule must be unlinked from lists to the moment.
378 static void xfrm_policy_kill(struct xfrm_policy *policy)
382 write_lock_bh(&policy->lock);
385 write_unlock_bh(&policy->lock);
387 if (unlikely(dead)) {
392 spin_lock(&xfrm_policy_gc_lock);
393 list_add(&policy->list, &xfrm_policy_gc_list);
394 spin_unlock(&xfrm_policy_gc_lock);
396 schedule_work(&xfrm_policy_gc_work);
399 /* Generate new index... KAME seems to generate them ordered by cost
400 * of an absolute inpredictability of ordering of rules. This will not pass. */
401 static u32 xfrm_gen_index(int dir)
404 struct xfrm_policy *p;
405 static u32 idx_generator;
408 idx = (idx_generator | dir);
412 for (p = xfrm_policy_list[dir]; p; p = p->next) {
421 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
423 struct xfrm_policy *pol, **p;
424 struct xfrm_policy *delpol = NULL;
425 struct xfrm_policy **newpos = NULL;
426 struct dst_entry *gc_list;
428 write_lock_bh(&xfrm_policy_lock);
429 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
430 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 &&
431 xfrm_sec_ctx_match(pol->security, policy->security)) {
433 write_unlock_bh(&xfrm_policy_lock);
438 if (policy->priority > pol->priority)
440 } else if (policy->priority >= pol->priority) {
452 xfrm_pol_hold(policy);
455 atomic_inc(&flow_cache_genid);
456 policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
457 policy->curlft.add_time = (unsigned long)xtime.tv_sec;
458 policy->curlft.use_time = 0;
459 if (!mod_timer(&policy->timer, jiffies + HZ))
460 xfrm_pol_hold(policy);
461 write_unlock_bh(&xfrm_policy_lock);
464 xfrm_policy_kill(delpol);
466 read_lock_bh(&xfrm_policy_lock);
468 for (policy = policy->next; policy; policy = policy->next) {
469 struct dst_entry *dst;
471 write_lock(&policy->lock);
472 dst = policy->bundles;
474 struct dst_entry *tail = dst;
477 tail->next = gc_list;
480 policy->bundles = NULL;
482 write_unlock(&policy->lock);
484 read_unlock_bh(&xfrm_policy_lock);
487 struct dst_entry *dst = gc_list;
495 EXPORT_SYMBOL(xfrm_policy_insert);
497 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel,
498 struct xfrm_sec_ctx *ctx, int delete)
500 struct xfrm_policy *pol, **p;
502 write_lock_bh(&xfrm_policy_lock);
503 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
504 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) &&
505 (xfrm_sec_ctx_match(ctx, pol->security))) {
512 write_unlock_bh(&xfrm_policy_lock);
515 atomic_inc(&flow_cache_genid);
516 xfrm_policy_kill(pol);
520 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
522 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
524 struct xfrm_policy *pol, **p;
526 write_lock_bh(&xfrm_policy_lock);
527 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
528 if (pol->index == id) {
535 write_unlock_bh(&xfrm_policy_lock);
538 atomic_inc(&flow_cache_genid);
539 xfrm_policy_kill(pol);
543 EXPORT_SYMBOL(xfrm_policy_byid);
545 void xfrm_policy_flush(void)
547 struct xfrm_policy *xp;
550 write_lock_bh(&xfrm_policy_lock);
551 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
552 while ((xp = xfrm_policy_list[dir]) != NULL) {
553 xfrm_policy_list[dir] = xp->next;
554 write_unlock_bh(&xfrm_policy_lock);
556 xfrm_policy_kill(xp);
558 write_lock_bh(&xfrm_policy_lock);
561 atomic_inc(&flow_cache_genid);
562 write_unlock_bh(&xfrm_policy_lock);
564 EXPORT_SYMBOL(xfrm_policy_flush);
566 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
569 struct xfrm_policy *xp;
574 read_lock_bh(&xfrm_policy_lock);
575 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
576 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
585 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
586 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
587 error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
594 read_unlock_bh(&xfrm_policy_lock);
597 EXPORT_SYMBOL(xfrm_policy_walk);
599 /* Find policy to apply to this flow. */
601 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir,
602 void **objp, atomic_t **obj_refp)
604 struct xfrm_policy *pol;
606 read_lock_bh(&xfrm_policy_lock);
607 for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
608 struct xfrm_selector *sel = &pol->selector;
611 if (pol->family != family)
614 match = xfrm_selector_match(sel, fl, family);
617 if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) {
623 read_unlock_bh(&xfrm_policy_lock);
624 if ((*objp = (void *) pol) != NULL)
625 *obj_refp = &pol->refcnt;
628 static inline int policy_to_flow_dir(int dir)
630 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
631 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
632 XFRM_POLICY_FWD == FLOW_DIR_FWD)
638 case XFRM_POLICY_OUT:
640 case XFRM_POLICY_FWD:
645 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid)
647 struct xfrm_policy *pol;
649 read_lock_bh(&xfrm_policy_lock);
650 if ((pol = sk->sk_policy[dir]) != NULL) {
651 int match = xfrm_selector_match(&pol->selector, fl,
656 err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir));
663 read_unlock_bh(&xfrm_policy_lock);
667 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
669 pol->next = xfrm_policy_list[dir];
670 xfrm_policy_list[dir] = pol;
674 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
677 struct xfrm_policy **polp;
679 for (polp = &xfrm_policy_list[dir];
680 *polp != NULL; polp = &(*polp)->next) {
689 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
691 write_lock_bh(&xfrm_policy_lock);
692 pol = __xfrm_policy_unlink(pol, dir);
693 write_unlock_bh(&xfrm_policy_lock);
695 if (dir < XFRM_POLICY_MAX)
696 atomic_inc(&flow_cache_genid);
697 xfrm_policy_kill(pol);
702 EXPORT_SYMBOL(xfrm_policy_delete);
704 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
706 struct xfrm_policy *old_pol;
708 write_lock_bh(&xfrm_policy_lock);
709 old_pol = sk->sk_policy[dir];
710 sk->sk_policy[dir] = pol;
712 pol->curlft.add_time = (unsigned long)xtime.tv_sec;
713 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
714 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
717 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
718 write_unlock_bh(&xfrm_policy_lock);
721 xfrm_policy_kill(old_pol);
726 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
728 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
731 newp->selector = old->selector;
732 if (security_xfrm_policy_clone(old, newp)) {
734 return NULL; /* ENOMEM */
736 newp->lft = old->lft;
737 newp->curlft = old->curlft;
738 newp->action = old->action;
739 newp->flags = old->flags;
740 newp->xfrm_nr = old->xfrm_nr;
741 newp->index = old->index;
742 memcpy(newp->xfrm_vec, old->xfrm_vec,
743 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
744 write_lock_bh(&xfrm_policy_lock);
745 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
746 write_unlock_bh(&xfrm_policy_lock);
752 int __xfrm_sk_clone_policy(struct sock *sk)
754 struct xfrm_policy *p0 = sk->sk_policy[0],
755 *p1 = sk->sk_policy[1];
757 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
758 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
760 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
765 /* Resolve list of templates for the flow, given policy. */
768 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
769 struct xfrm_state **xfrm,
770 unsigned short family)
774 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
775 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
777 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
778 struct xfrm_state *x;
779 xfrm_address_t *remote = daddr;
780 xfrm_address_t *local = saddr;
781 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
784 remote = &tmpl->id.daddr;
785 local = &tmpl->saddr;
788 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
790 if (x && x->km.state == XFRM_STATE_VALID) {
797 error = (x->km.state == XFRM_STATE_ERROR ?
808 for (nx--; nx>=0; nx--)
809 xfrm_state_put(xfrm[nx]);
813 /* Check that the bundle accepts the flow and its components are
817 static struct dst_entry *
818 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
821 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
822 if (unlikely(afinfo == NULL))
823 return ERR_PTR(-EINVAL);
824 x = afinfo->find_bundle(fl, policy);
825 xfrm_policy_put_afinfo(afinfo);
829 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
830 * all the metrics... Shortly, bundle a bundle.
834 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
835 struct flowi *fl, struct dst_entry **dst_p,
836 unsigned short family)
839 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
840 if (unlikely(afinfo == NULL))
842 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
843 xfrm_policy_put_afinfo(afinfo);
848 static int stale_bundle(struct dst_entry *dst);
850 /* Main function: finds/creates a bundle for given flow.
852 * At the moment we eat a raw IP route. Mostly to speed up lookups
853 * on interfaces with disabled IPsec.
855 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
856 struct sock *sk, int flags)
858 struct xfrm_policy *policy;
859 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
860 struct dst_entry *dst, *dst_orig = *dst_p;
865 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
866 u32 sk_sid = security_sk_sid(sk, fl, dir);
868 genid = atomic_read(&flow_cache_genid);
870 if (sk && sk->sk_policy[1])
871 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid);
874 /* To accelerate a bit... */
875 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
878 policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family,
879 dir, xfrm_policy_lookup);
885 family = dst_orig->ops->family;
886 policy->curlft.use_time = (unsigned long)xtime.tv_sec;
888 switch (policy->action) {
889 case XFRM_POLICY_BLOCK:
890 /* Prohibit the flow */
894 case XFRM_POLICY_ALLOW:
895 if (policy->xfrm_nr == 0) {
896 /* Flow passes not transformed. */
897 xfrm_pol_put(policy);
901 /* Try to find matching bundle.
903 * LATER: help from flow cache. It is optional, this
904 * is required only for output policy.
906 dst = xfrm_find_bundle(fl, policy, family);
915 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
917 if (unlikely(nx<0)) {
919 if (err == -EAGAIN && flags) {
920 DECLARE_WAITQUEUE(wait, current);
922 add_wait_queue(&km_waitq, &wait);
923 set_current_state(TASK_INTERRUPTIBLE);
925 set_current_state(TASK_RUNNING);
926 remove_wait_queue(&km_waitq, &wait);
928 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
930 if (nx == -EAGAIN && signal_pending(current)) {
935 genid != atomic_read(&flow_cache_genid)) {
936 xfrm_pol_put(policy);
945 /* Flow passes not transformed. */
946 xfrm_pol_put(policy);
951 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
956 xfrm_state_put(xfrm[i]);
960 write_lock_bh(&policy->lock);
961 if (unlikely(policy->dead || stale_bundle(dst))) {
962 /* Wow! While we worked on resolving, this
963 * policy has gone. Retry. It is not paranoia,
964 * we just cannot enlist new bundle to dead object.
965 * We can't enlist stable bundles either.
967 write_unlock_bh(&policy->lock);
974 dst->next = policy->bundles;
975 policy->bundles = dst;
977 write_unlock_bh(&policy->lock);
980 dst_release(dst_orig);
981 xfrm_pol_put(policy);
985 dst_release(dst_orig);
986 xfrm_pol_put(policy);
990 EXPORT_SYMBOL(xfrm_lookup);
992 /* When skb is transformed back to its "native" form, we have to
993 * check policy restrictions. At the moment we make this in maximally
994 * stupid way. Shame on me. :-) Of course, connected sockets must
995 * have policy cached at them.
999 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1000 unsigned short family)
1002 if (xfrm_state_kern(x))
1003 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
1004 return x->id.proto == tmpl->id.proto &&
1005 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1006 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1007 x->props.mode == tmpl->mode &&
1008 (tmpl->aalgos & (1<<x->props.aalgo)) &&
1009 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
1013 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1014 unsigned short family)
1018 if (tmpl->optional) {
1023 for (; idx < sp->len; idx++) {
1024 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1026 if (sp->xvec[idx]->props.mode)
1033 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
1035 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1037 if (unlikely(afinfo == NULL))
1038 return -EAFNOSUPPORT;
1040 afinfo->decode_session(skb, fl);
1041 xfrm_policy_put_afinfo(afinfo);
1044 EXPORT_SYMBOL(xfrm_decode_session);
1046 static inline int secpath_has_tunnel(struct sec_path *sp, int k)
1048 for (; k < sp->len; k++) {
1049 if (sp->xvec[k]->props.mode)
1056 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1057 unsigned short family)
1059 struct xfrm_policy *pol;
1061 u8 fl_dir = policy_to_flow_dir(dir);
1064 if (xfrm_decode_session(skb, &fl, family) < 0)
1066 nf_nat_decode_session(skb, &fl, family);
1068 sk_sid = security_sk_sid(sk, &fl, fl_dir);
1070 /* First, check used SA against their selectors. */
1074 for (i=skb->sp->len-1; i>=0; i--) {
1075 struct xfrm_state *x = skb->sp->xvec[i];
1076 if (!xfrm_selector_match(&x->sel, &fl, family))
1082 if (sk && sk->sk_policy[dir])
1083 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid);
1086 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir,
1087 xfrm_policy_lookup);
1090 return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
1092 pol->curlft.use_time = (unsigned long)xtime.tv_sec;
1094 if (pol->action == XFRM_POLICY_ALLOW) {
1095 struct sec_path *sp;
1096 static struct sec_path dummy;
1099 if ((sp = skb->sp) == NULL)
1102 /* For each tunnel xfrm, find the first matching tmpl.
1103 * For each tmpl before that, find corresponding xfrm.
1104 * Order is _important_. Later we will implement
1105 * some barriers, but at the moment barriers
1106 * are implied between each two transformations.
1108 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
1109 k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
1114 if (secpath_has_tunnel(sp, k))
1125 EXPORT_SYMBOL(__xfrm_policy_check);
1127 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1131 if (xfrm_decode_session(skb, &fl, family) < 0)
1134 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1136 EXPORT_SYMBOL(__xfrm_route_forward);
1138 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1140 /* If it is marked obsolete, which is how we even get here,
1141 * then we have purged it from the policy bundle list and we
1142 * did that for a good reason.
1147 static int stale_bundle(struct dst_entry *dst)
1149 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
1152 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1154 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1155 dst->dev = &loopback_dev;
1156 dev_hold(&loopback_dev);
1160 EXPORT_SYMBOL(xfrm_dst_ifdown);
1162 static void xfrm_link_failure(struct sk_buff *skb)
1164 /* Impossible. Such dst must be popped before reaches point of failure. */
1168 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1171 if (dst->obsolete) {
1179 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1182 struct xfrm_policy *pol;
1183 struct dst_entry *dst, **dstp, *gc_list = NULL;
1185 read_lock_bh(&xfrm_policy_lock);
1186 for (i=0; i<2*XFRM_POLICY_MAX; i++) {
1187 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
1188 write_lock(&pol->lock);
1189 dstp = &pol->bundles;
1190 while ((dst=*dstp) != NULL) {
1193 dst->next = gc_list;
1199 write_unlock(&pol->lock);
1202 read_unlock_bh(&xfrm_policy_lock);
1206 gc_list = dst->next;
1211 static int unused_bundle(struct dst_entry *dst)
1213 return !atomic_read(&dst->__refcnt);
1216 static void __xfrm_garbage_collect(void)
1218 xfrm_prune_bundles(unused_bundle);
1221 int xfrm_flush_bundles(void)
1223 xfrm_prune_bundles(stale_bundle);
1227 static int always_true(struct dst_entry *dst)
1232 void xfrm_flush_all_bundles(void)
1234 xfrm_prune_bundles(always_true);
1237 void xfrm_init_pmtu(struct dst_entry *dst)
1240 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1241 u32 pmtu, route_mtu_cached;
1243 pmtu = dst_mtu(dst->child);
1244 xdst->child_mtu_cached = pmtu;
1246 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1248 route_mtu_cached = dst_mtu(xdst->route);
1249 xdst->route_mtu_cached = route_mtu_cached;
1251 if (pmtu > route_mtu_cached)
1252 pmtu = route_mtu_cached;
1254 dst->metrics[RTAX_MTU-1] = pmtu;
1255 } while ((dst = dst->next));
1258 EXPORT_SYMBOL(xfrm_init_pmtu);
1260 /* Check that the bundle accepts the flow and its components are
1264 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1266 struct dst_entry *dst = &first->u.dst;
1267 struct xfrm_dst *last;
1270 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1271 (dst->dev && !netif_running(dst->dev)))
1277 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1279 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1281 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1284 mtu = dst_mtu(dst->child);
1285 if (xdst->child_mtu_cached != mtu) {
1287 xdst->child_mtu_cached = mtu;
1290 if (!dst_check(xdst->route, xdst->route_cookie))
1292 mtu = dst_mtu(xdst->route);
1293 if (xdst->route_mtu_cached != mtu) {
1295 xdst->route_mtu_cached = mtu;
1299 } while (dst->xfrm);
1304 mtu = last->child_mtu_cached;
1308 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1309 if (mtu > last->route_mtu_cached)
1310 mtu = last->route_mtu_cached;
1311 dst->metrics[RTAX_MTU-1] = mtu;
1316 last = last->u.next;
1317 last->child_mtu_cached = mtu;
1323 EXPORT_SYMBOL(xfrm_bundle_ok);
1325 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1328 if (unlikely(afinfo == NULL))
1330 if (unlikely(afinfo->family >= NPROTO))
1331 return -EAFNOSUPPORT;
1332 write_lock_bh(&xfrm_policy_afinfo_lock);
1333 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
1336 struct dst_ops *dst_ops = afinfo->dst_ops;
1337 if (likely(dst_ops->kmem_cachep == NULL))
1338 dst_ops->kmem_cachep = xfrm_dst_cache;
1339 if (likely(dst_ops->check == NULL))
1340 dst_ops->check = xfrm_dst_check;
1341 if (likely(dst_ops->negative_advice == NULL))
1342 dst_ops->negative_advice = xfrm_negative_advice;
1343 if (likely(dst_ops->link_failure == NULL))
1344 dst_ops->link_failure = xfrm_link_failure;
1345 if (likely(afinfo->garbage_collect == NULL))
1346 afinfo->garbage_collect = __xfrm_garbage_collect;
1347 xfrm_policy_afinfo[afinfo->family] = afinfo;
1349 write_unlock_bh(&xfrm_policy_afinfo_lock);
1352 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
1354 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1357 if (unlikely(afinfo == NULL))
1359 if (unlikely(afinfo->family >= NPROTO))
1360 return -EAFNOSUPPORT;
1361 write_lock_bh(&xfrm_policy_afinfo_lock);
1362 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
1363 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
1366 struct dst_ops *dst_ops = afinfo->dst_ops;
1367 xfrm_policy_afinfo[afinfo->family] = NULL;
1368 dst_ops->kmem_cachep = NULL;
1369 dst_ops->check = NULL;
1370 dst_ops->negative_advice = NULL;
1371 dst_ops->link_failure = NULL;
1372 afinfo->garbage_collect = NULL;
1375 write_unlock_bh(&xfrm_policy_afinfo_lock);
1378 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
1380 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
1382 struct xfrm_policy_afinfo *afinfo;
1383 if (unlikely(family >= NPROTO))
1385 read_lock(&xfrm_policy_afinfo_lock);
1386 afinfo = xfrm_policy_afinfo[family];
1387 if (unlikely(!afinfo))
1388 read_unlock(&xfrm_policy_afinfo_lock);
1392 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
1394 read_unlock(&xfrm_policy_afinfo_lock);
1397 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
1399 struct xfrm_policy_afinfo *afinfo;
1400 if (unlikely(family >= NPROTO))
1402 write_lock_bh(&xfrm_policy_afinfo_lock);
1403 afinfo = xfrm_policy_afinfo[family];
1404 if (unlikely(!afinfo))
1405 write_unlock_bh(&xfrm_policy_afinfo_lock);
1409 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
1411 write_unlock_bh(&xfrm_policy_afinfo_lock);
1414 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
1418 xfrm_flush_bundles();
1423 static struct notifier_block xfrm_dev_notifier = {
1429 static void __init xfrm_policy_init(void)
1431 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
1432 sizeof(struct xfrm_dst),
1433 0, SLAB_HWCACHE_ALIGN,
1435 if (!xfrm_dst_cache)
1436 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1438 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
1439 register_netdevice_notifier(&xfrm_dev_notifier);
1442 void __init xfrm_init(void)