6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
30 #include "xfrm_hash.h"
32 int sysctl_xfrm_larval_drop __read_mostly;
34 DEFINE_MUTEX(xfrm_cfg_mutex);
35 EXPORT_SYMBOL(xfrm_cfg_mutex);
37 static DEFINE_RWLOCK(xfrm_policy_lock);
39 unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
40 EXPORT_SYMBOL(xfrm_policy_count);
42 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
43 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
45 static struct kmem_cache *xfrm_dst_cache __read_mostly;
47 static struct work_struct xfrm_policy_gc_work;
48 static HLIST_HEAD(xfrm_policy_gc_list);
49 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
51 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
52 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
55 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
57 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
58 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
59 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
60 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
61 (fl->proto == sel->proto || !sel->proto) &&
62 (fl->oif == sel->ifindex || !sel->ifindex);
66 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
68 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
69 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
70 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
71 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
72 (fl->proto == sel->proto || !sel->proto) &&
73 (fl->oif == sel->ifindex || !sel->ifindex);
76 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
77 unsigned short family)
81 return __xfrm4_selector_match(sel, fl);
83 return __xfrm6_selector_match(sel, fl);
88 struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos)
90 xfrm_address_t *saddr = &x->props.saddr;
91 xfrm_address_t *daddr = &x->id.daddr;
92 struct xfrm_policy_afinfo *afinfo;
93 struct dst_entry *dst;
95 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR)
97 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR)
100 afinfo = xfrm_policy_get_afinfo(x->props.family);
101 if (unlikely(afinfo == NULL))
102 return ERR_PTR(-EAFNOSUPPORT);
104 dst = afinfo->dst_lookup(tos, saddr, daddr);
105 xfrm_policy_put_afinfo(afinfo);
108 EXPORT_SYMBOL(xfrm_dst_lookup);
110 static inline unsigned long make_jiffies(long secs)
112 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
113 return MAX_SCHEDULE_TIMEOUT-1;
118 static void xfrm_policy_timer(unsigned long data)
120 struct xfrm_policy *xp = (struct xfrm_policy*)data;
121 unsigned long now = get_seconds();
122 long next = LONG_MAX;
126 read_lock(&xp->lock);
131 dir = xfrm_policy_id2dir(xp->index);
133 if (xp->lft.hard_add_expires_seconds) {
134 long tmo = xp->lft.hard_add_expires_seconds +
135 xp->curlft.add_time - now;
141 if (xp->lft.hard_use_expires_seconds) {
142 long tmo = xp->lft.hard_use_expires_seconds +
143 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
149 if (xp->lft.soft_add_expires_seconds) {
150 long tmo = xp->lft.soft_add_expires_seconds +
151 xp->curlft.add_time - now;
154 tmo = XFRM_KM_TIMEOUT;
159 if (xp->lft.soft_use_expires_seconds) {
160 long tmo = xp->lft.soft_use_expires_seconds +
161 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
164 tmo = XFRM_KM_TIMEOUT;
171 km_policy_expired(xp, dir, 0, 0);
172 if (next != LONG_MAX &&
173 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
177 read_unlock(&xp->lock);
182 read_unlock(&xp->lock);
183 if (!xfrm_policy_delete(xp, dir))
184 km_policy_expired(xp, dir, 1, 0);
189 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
193 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
195 struct xfrm_policy *policy;
197 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
200 INIT_HLIST_NODE(&policy->bydst);
201 INIT_HLIST_NODE(&policy->byidx);
202 rwlock_init(&policy->lock);
203 atomic_set(&policy->refcnt, 1);
204 setup_timer(&policy->timer, xfrm_policy_timer,
205 (unsigned long)policy);
209 EXPORT_SYMBOL(xfrm_policy_alloc);
211 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
213 void __xfrm_policy_destroy(struct xfrm_policy *policy)
215 BUG_ON(!policy->dead);
217 BUG_ON(policy->bundles);
219 if (del_timer(&policy->timer))
222 security_xfrm_policy_free(policy);
225 EXPORT_SYMBOL(__xfrm_policy_destroy);
227 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
229 struct dst_entry *dst;
231 while ((dst = policy->bundles) != NULL) {
232 policy->bundles = dst->next;
236 if (del_timer(&policy->timer))
237 atomic_dec(&policy->refcnt);
239 if (atomic_read(&policy->refcnt) > 1)
242 xfrm_pol_put(policy);
245 static void xfrm_policy_gc_task(struct work_struct *work)
247 struct xfrm_policy *policy;
248 struct hlist_node *entry, *tmp;
249 struct hlist_head gc_list;
251 spin_lock_bh(&xfrm_policy_gc_lock);
252 gc_list.first = xfrm_policy_gc_list.first;
253 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
254 spin_unlock_bh(&xfrm_policy_gc_lock);
256 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
257 xfrm_policy_gc_kill(policy);
260 /* Rule must be locked. Release descentant resources, announce
261 * entry dead. The rule must be unlinked from lists to the moment.
264 static void xfrm_policy_kill(struct xfrm_policy *policy)
268 write_lock_bh(&policy->lock);
271 write_unlock_bh(&policy->lock);
273 if (unlikely(dead)) {
278 spin_lock(&xfrm_policy_gc_lock);
279 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
280 spin_unlock(&xfrm_policy_gc_lock);
282 schedule_work(&xfrm_policy_gc_work);
285 struct xfrm_policy_hash {
286 struct hlist_head *table;
290 static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
291 static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
292 static struct hlist_head *xfrm_policy_byidx __read_mostly;
293 static unsigned int xfrm_idx_hmask __read_mostly;
294 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
296 static inline unsigned int idx_hash(u32 index)
298 return __idx_hash(index, xfrm_idx_hmask);
301 static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
303 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
304 unsigned int hash = __sel_hash(sel, family, hmask);
306 return (hash == hmask + 1 ?
307 &xfrm_policy_inexact[dir] :
308 xfrm_policy_bydst[dir].table + hash);
311 static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
313 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
314 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
316 return xfrm_policy_bydst[dir].table + hash;
319 static void xfrm_dst_hash_transfer(struct hlist_head *list,
320 struct hlist_head *ndsttable,
321 unsigned int nhashmask)
323 struct hlist_node *entry, *tmp;
324 struct xfrm_policy *pol;
326 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
329 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
330 pol->family, nhashmask);
331 hlist_add_head(&pol->bydst, ndsttable+h);
335 static void xfrm_idx_hash_transfer(struct hlist_head *list,
336 struct hlist_head *nidxtable,
337 unsigned int nhashmask)
339 struct hlist_node *entry, *tmp;
340 struct xfrm_policy *pol;
342 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
345 h = __idx_hash(pol->index, nhashmask);
346 hlist_add_head(&pol->byidx, nidxtable+h);
350 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
352 return ((old_hmask + 1) << 1) - 1;
355 static void xfrm_bydst_resize(int dir)
357 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
358 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
359 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
360 struct hlist_head *odst = xfrm_policy_bydst[dir].table;
361 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
367 write_lock_bh(&xfrm_policy_lock);
369 for (i = hmask; i >= 0; i--)
370 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
372 xfrm_policy_bydst[dir].table = ndst;
373 xfrm_policy_bydst[dir].hmask = nhashmask;
375 write_unlock_bh(&xfrm_policy_lock);
377 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
380 static void xfrm_byidx_resize(int total)
382 unsigned int hmask = xfrm_idx_hmask;
383 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
384 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
385 struct hlist_head *oidx = xfrm_policy_byidx;
386 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
392 write_lock_bh(&xfrm_policy_lock);
394 for (i = hmask; i >= 0; i--)
395 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
397 xfrm_policy_byidx = nidx;
398 xfrm_idx_hmask = nhashmask;
400 write_unlock_bh(&xfrm_policy_lock);
402 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
405 static inline int xfrm_bydst_should_resize(int dir, int *total)
407 unsigned int cnt = xfrm_policy_count[dir];
408 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
413 if ((hmask + 1) < xfrm_policy_hashmax &&
420 static inline int xfrm_byidx_should_resize(int total)
422 unsigned int hmask = xfrm_idx_hmask;
424 if ((hmask + 1) < xfrm_policy_hashmax &&
431 void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
433 read_lock_bh(&xfrm_policy_lock);
434 si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
435 si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
436 si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
437 si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
438 si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
439 si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
440 si->spdhcnt = xfrm_idx_hmask;
441 si->spdhmcnt = xfrm_policy_hashmax;
442 read_unlock_bh(&xfrm_policy_lock);
444 EXPORT_SYMBOL(xfrm_spd_getinfo);
446 static DEFINE_MUTEX(hash_resize_mutex);
447 static void xfrm_hash_resize(struct work_struct *__unused)
451 mutex_lock(&hash_resize_mutex);
454 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
455 if (xfrm_bydst_should_resize(dir, &total))
456 xfrm_bydst_resize(dir);
458 if (xfrm_byidx_should_resize(total))
459 xfrm_byidx_resize(total);
461 mutex_unlock(&hash_resize_mutex);
464 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
466 /* Generate new index... KAME seems to generate them ordered by cost
467 * of an absolute inpredictability of ordering of rules. This will not pass. */
468 static u32 xfrm_gen_index(u8 type, int dir)
470 static u32 idx_generator;
473 struct hlist_node *entry;
474 struct hlist_head *list;
475 struct xfrm_policy *p;
479 idx = (idx_generator | dir);
483 list = xfrm_policy_byidx + idx_hash(idx);
485 hlist_for_each_entry(p, entry, list, byidx) {
486 if (p->index == idx) {
496 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
498 u32 *p1 = (u32 *) s1;
499 u32 *p2 = (u32 *) s2;
500 int len = sizeof(struct xfrm_selector) / sizeof(u32);
503 for (i = 0; i < len; i++) {
511 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
513 struct xfrm_policy *pol;
514 struct xfrm_policy *delpol;
515 struct hlist_head *chain;
516 struct hlist_node *entry, *newpos;
517 struct dst_entry *gc_list;
519 write_lock_bh(&xfrm_policy_lock);
520 chain = policy_hash_bysel(&policy->selector, policy->family, dir);
523 hlist_for_each_entry(pol, entry, chain, bydst) {
524 if (pol->type == policy->type &&
525 !selector_cmp(&pol->selector, &policy->selector) &&
526 xfrm_sec_ctx_match(pol->security, policy->security) &&
529 write_unlock_bh(&xfrm_policy_lock);
533 if (policy->priority > pol->priority)
535 } else if (policy->priority >= pol->priority) {
536 newpos = &pol->bydst;
543 hlist_add_after(newpos, &policy->bydst);
545 hlist_add_head(&policy->bydst, chain);
546 xfrm_pol_hold(policy);
547 xfrm_policy_count[dir]++;
548 atomic_inc(&flow_cache_genid);
550 hlist_del(&delpol->bydst);
551 hlist_del(&delpol->byidx);
552 xfrm_policy_count[dir]--;
554 policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
555 hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
556 policy->curlft.add_time = get_seconds();
557 policy->curlft.use_time = 0;
558 if (!mod_timer(&policy->timer, jiffies + HZ))
559 xfrm_pol_hold(policy);
560 write_unlock_bh(&xfrm_policy_lock);
563 xfrm_policy_kill(delpol);
564 else if (xfrm_bydst_should_resize(dir, NULL))
565 schedule_work(&xfrm_hash_work);
567 read_lock_bh(&xfrm_policy_lock);
569 entry = &policy->bydst;
570 hlist_for_each_entry_continue(policy, entry, bydst) {
571 struct dst_entry *dst;
573 write_lock(&policy->lock);
574 dst = policy->bundles;
576 struct dst_entry *tail = dst;
579 tail->next = gc_list;
582 policy->bundles = NULL;
584 write_unlock(&policy->lock);
586 read_unlock_bh(&xfrm_policy_lock);
589 struct dst_entry *dst = gc_list;
597 EXPORT_SYMBOL(xfrm_policy_insert);
599 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
600 struct xfrm_selector *sel,
601 struct xfrm_sec_ctx *ctx, int delete,
604 struct xfrm_policy *pol, *ret;
605 struct hlist_head *chain;
606 struct hlist_node *entry;
609 write_lock_bh(&xfrm_policy_lock);
610 chain = policy_hash_bysel(sel, sel->family, dir);
612 hlist_for_each_entry(pol, entry, chain, bydst) {
613 if (pol->type == type &&
614 !selector_cmp(sel, &pol->selector) &&
615 xfrm_sec_ctx_match(ctx, pol->security)) {
618 *err = security_xfrm_policy_delete(pol);
620 write_unlock_bh(&xfrm_policy_lock);
623 hlist_del(&pol->bydst);
624 hlist_del(&pol->byidx);
625 xfrm_policy_count[dir]--;
631 write_unlock_bh(&xfrm_policy_lock);
634 atomic_inc(&flow_cache_genid);
635 xfrm_policy_kill(ret);
639 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
641 struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
644 struct xfrm_policy *pol, *ret;
645 struct hlist_head *chain;
646 struct hlist_node *entry;
649 if (xfrm_policy_id2dir(id) != dir)
653 write_lock_bh(&xfrm_policy_lock);
654 chain = xfrm_policy_byidx + idx_hash(id);
656 hlist_for_each_entry(pol, entry, chain, byidx) {
657 if (pol->type == type && pol->index == id) {
660 *err = security_xfrm_policy_delete(pol);
662 write_unlock_bh(&xfrm_policy_lock);
665 hlist_del(&pol->bydst);
666 hlist_del(&pol->byidx);
667 xfrm_policy_count[dir]--;
673 write_unlock_bh(&xfrm_policy_lock);
676 atomic_inc(&flow_cache_genid);
677 xfrm_policy_kill(ret);
681 EXPORT_SYMBOL(xfrm_policy_byid);
683 #ifdef CONFIG_SECURITY_NETWORK_XFRM
685 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
689 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
690 struct xfrm_policy *pol;
691 struct hlist_node *entry;
694 hlist_for_each_entry(pol, entry,
695 &xfrm_policy_inexact[dir], bydst) {
696 if (pol->type != type)
698 err = security_xfrm_policy_delete(pol);
700 xfrm_audit_policy_delete(pol, 0,
701 audit_info->loginuid,
706 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
707 hlist_for_each_entry(pol, entry,
708 xfrm_policy_bydst[dir].table + i,
710 if (pol->type != type)
712 err = security_xfrm_policy_delete(pol);
714 xfrm_audit_policy_delete(pol, 0,
715 audit_info->loginuid,
726 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
732 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
736 write_lock_bh(&xfrm_policy_lock);
738 err = xfrm_policy_flush_secctx_check(type, audit_info);
742 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
743 struct xfrm_policy *pol;
744 struct hlist_node *entry;
749 hlist_for_each_entry(pol, entry,
750 &xfrm_policy_inexact[dir], bydst) {
751 if (pol->type != type)
753 hlist_del(&pol->bydst);
754 hlist_del(&pol->byidx);
755 write_unlock_bh(&xfrm_policy_lock);
757 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
760 xfrm_policy_kill(pol);
763 write_lock_bh(&xfrm_policy_lock);
767 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
769 hlist_for_each_entry(pol, entry,
770 xfrm_policy_bydst[dir].table + i,
772 if (pol->type != type)
774 hlist_del(&pol->bydst);
775 hlist_del(&pol->byidx);
776 write_unlock_bh(&xfrm_policy_lock);
778 xfrm_audit_policy_delete(pol, 1,
779 audit_info->loginuid,
781 xfrm_policy_kill(pol);
784 write_lock_bh(&xfrm_policy_lock);
789 xfrm_policy_count[dir] -= killed;
791 atomic_inc(&flow_cache_genid);
793 write_unlock_bh(&xfrm_policy_lock);
796 EXPORT_SYMBOL(xfrm_policy_flush);
798 int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
801 struct xfrm_policy *pol, *last = NULL;
802 struct hlist_node *entry;
803 int dir, last_dir = 0, count, error;
805 read_lock_bh(&xfrm_policy_lock);
808 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
809 struct hlist_head *table = xfrm_policy_bydst[dir].table;
812 hlist_for_each_entry(pol, entry,
813 &xfrm_policy_inexact[dir], bydst) {
814 if (pol->type != type)
817 error = func(last, last_dir % XFRM_POLICY_MAX,
826 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
827 hlist_for_each_entry(pol, entry, table + i, bydst) {
828 if (pol->type != type)
831 error = func(last, last_dir % XFRM_POLICY_MAX,
846 error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
848 read_unlock_bh(&xfrm_policy_lock);
851 EXPORT_SYMBOL(xfrm_policy_walk);
854 * Find policy to apply to this flow.
856 * Returns 0 if policy found, else an -errno.
858 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
859 u8 type, u16 family, int dir)
861 struct xfrm_selector *sel = &pol->selector;
862 int match, ret = -ESRCH;
864 if (pol->family != family ||
868 match = xfrm_selector_match(sel, fl, family);
870 ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
875 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
879 struct xfrm_policy *pol, *ret;
880 xfrm_address_t *daddr, *saddr;
881 struct hlist_node *entry;
882 struct hlist_head *chain;
885 daddr = xfrm_flowi_daddr(fl, family);
886 saddr = xfrm_flowi_saddr(fl, family);
887 if (unlikely(!daddr || !saddr))
890 read_lock_bh(&xfrm_policy_lock);
891 chain = policy_hash_direct(daddr, saddr, family, dir);
893 hlist_for_each_entry(pol, entry, chain, bydst) {
894 err = xfrm_policy_match(pol, fl, type, family, dir);
904 priority = ret->priority;
908 chain = &xfrm_policy_inexact[dir];
909 hlist_for_each_entry(pol, entry, chain, bydst) {
910 err = xfrm_policy_match(pol, fl, type, family, dir);
918 } else if (pol->priority < priority) {
926 read_unlock_bh(&xfrm_policy_lock);
931 static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
932 void **objp, atomic_t **obj_refp)
934 struct xfrm_policy *pol;
937 #ifdef CONFIG_XFRM_SUB_POLICY
938 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
946 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
951 #ifdef CONFIG_XFRM_SUB_POLICY
954 if ((*objp = (void *) pol) != NULL)
955 *obj_refp = &pol->refcnt;
959 static inline int policy_to_flow_dir(int dir)
961 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
962 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
963 XFRM_POLICY_FWD == FLOW_DIR_FWD)
969 case XFRM_POLICY_OUT:
971 case XFRM_POLICY_FWD:
976 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
978 struct xfrm_policy *pol;
980 read_lock_bh(&xfrm_policy_lock);
981 if ((pol = sk->sk_policy[dir]) != NULL) {
982 int match = xfrm_selector_match(&pol->selector, fl,
987 err = security_xfrm_policy_lookup(pol, fl->secid,
988 policy_to_flow_dir(dir));
991 else if (err == -ESRCH)
998 read_unlock_bh(&xfrm_policy_lock);
1002 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1004 struct hlist_head *chain = policy_hash_bysel(&pol->selector,
1007 hlist_add_head(&pol->bydst, chain);
1008 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
1009 xfrm_policy_count[dir]++;
1012 if (xfrm_bydst_should_resize(dir, NULL))
1013 schedule_work(&xfrm_hash_work);
1016 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1019 if (hlist_unhashed(&pol->bydst))
1022 hlist_del(&pol->bydst);
1023 hlist_del(&pol->byidx);
1024 xfrm_policy_count[dir]--;
1029 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1031 write_lock_bh(&xfrm_policy_lock);
1032 pol = __xfrm_policy_unlink(pol, dir);
1033 write_unlock_bh(&xfrm_policy_lock);
1035 if (dir < XFRM_POLICY_MAX)
1036 atomic_inc(&flow_cache_genid);
1037 xfrm_policy_kill(pol);
1042 EXPORT_SYMBOL(xfrm_policy_delete);
1044 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1046 struct xfrm_policy *old_pol;
1048 #ifdef CONFIG_XFRM_SUB_POLICY
1049 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1053 write_lock_bh(&xfrm_policy_lock);
1054 old_pol = sk->sk_policy[dir];
1055 sk->sk_policy[dir] = pol;
1057 pol->curlft.add_time = get_seconds();
1058 pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
1059 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1062 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1063 write_unlock_bh(&xfrm_policy_lock);
1066 xfrm_policy_kill(old_pol);
1071 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1073 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
1076 newp->selector = old->selector;
1077 if (security_xfrm_policy_clone(old, newp)) {
1079 return NULL; /* ENOMEM */
1081 newp->lft = old->lft;
1082 newp->curlft = old->curlft;
1083 newp->action = old->action;
1084 newp->flags = old->flags;
1085 newp->xfrm_nr = old->xfrm_nr;
1086 newp->index = old->index;
1087 newp->type = old->type;
1088 memcpy(newp->xfrm_vec, old->xfrm_vec,
1089 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1090 write_lock_bh(&xfrm_policy_lock);
1091 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1092 write_unlock_bh(&xfrm_policy_lock);
1098 int __xfrm_sk_clone_policy(struct sock *sk)
1100 struct xfrm_policy *p0 = sk->sk_policy[0],
1101 *p1 = sk->sk_policy[1];
1103 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1104 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1106 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1112 xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
1113 unsigned short family)
1116 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1118 if (unlikely(afinfo == NULL))
1120 err = afinfo->get_saddr(local, remote);
1121 xfrm_policy_put_afinfo(afinfo);
1125 /* Resolve list of templates for the flow, given policy. */
1128 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1129 struct xfrm_state **xfrm,
1130 unsigned short family)
1134 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1135 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1138 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1139 struct xfrm_state *x;
1140 xfrm_address_t *remote = daddr;
1141 xfrm_address_t *local = saddr;
1142 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1144 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1145 tmpl->mode == XFRM_MODE_BEET) {
1146 remote = &tmpl->id.daddr;
1147 local = &tmpl->saddr;
1148 family = tmpl->encap_family;
1149 if (xfrm_addr_any(local, family)) {
1150 error = xfrm_get_saddr(&tmp, remote, family);
1157 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1159 if (x && x->km.state == XFRM_STATE_VALID) {
1166 error = (x->km.state == XFRM_STATE_ERROR ?
1171 if (!tmpl->optional)
1177 for (nx--; nx>=0; nx--)
1178 xfrm_state_put(xfrm[nx]);
1183 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1184 struct xfrm_state **xfrm,
1185 unsigned short family)
1187 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1188 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1194 for (i = 0; i < npols; i++) {
1195 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1200 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1208 /* found states are sorted for outbound processing */
1210 xfrm_state_sort(xfrm, tpp, cnx, family);
1215 for (cnx--; cnx>=0; cnx--)
1216 xfrm_state_put(tpp[cnx]);
1221 /* Check that the bundle accepts the flow and its components are
1225 static struct dst_entry *
1226 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1228 struct dst_entry *x;
1229 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1230 if (unlikely(afinfo == NULL))
1231 return ERR_PTR(-EINVAL);
1232 x = afinfo->find_bundle(fl, policy);
1233 xfrm_policy_put_afinfo(afinfo);
1237 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1238 * all the metrics... Shortly, bundle a bundle.
1242 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
1243 struct flowi *fl, struct dst_entry **dst_p,
1244 unsigned short family)
1247 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1248 if (unlikely(afinfo == NULL))
1250 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
1251 xfrm_policy_put_afinfo(afinfo);
1256 xfrm_dst_alloc_copy(void **target, void *src, int size)
1259 *target = kmalloc(size, GFP_ATOMIC);
1263 memcpy(*target, src, size);
1268 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
1270 #ifdef CONFIG_XFRM_SUB_POLICY
1271 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1272 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1280 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1282 #ifdef CONFIG_XFRM_SUB_POLICY
1283 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1284 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1290 static int stale_bundle(struct dst_entry *dst);
1292 /* Main function: finds/creates a bundle for given flow.
1294 * At the moment we eat a raw IP route. Mostly to speed up lookups
1295 * on interfaces with disabled IPsec.
1297 int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
1298 struct sock *sk, int flags)
1300 struct xfrm_policy *policy;
1301 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1306 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1307 struct dst_entry *dst, *dst_orig = *dst_p;
1312 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1315 genid = atomic_read(&flow_cache_genid);
1317 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
1323 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1324 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1325 err = PTR_ERR(policy);
1331 /* To accelerate a bit... */
1332 if ((dst_orig->flags & DST_NOXFRM) ||
1333 !xfrm_policy_count[XFRM_POLICY_OUT])
1336 policy = flow_cache_lookup(fl, dst_orig->ops->family,
1337 dir, xfrm_policy_lookup);
1338 err = PTR_ERR(policy);
1346 family = dst_orig->ops->family;
1347 policy->curlft.use_time = get_seconds();
1350 xfrm_nr += pols[0]->xfrm_nr;
1352 switch (policy->action) {
1354 case XFRM_POLICY_BLOCK:
1355 /* Prohibit the flow */
1359 case XFRM_POLICY_ALLOW:
1360 #ifndef CONFIG_XFRM_SUB_POLICY
1361 if (policy->xfrm_nr == 0) {
1362 /* Flow passes not transformed. */
1363 xfrm_pol_put(policy);
1368 /* Try to find matching bundle.
1370 * LATER: help from flow cache. It is optional, this
1371 * is required only for output policy.
1373 dst = xfrm_find_bundle(fl, policy, family);
1382 #ifdef CONFIG_XFRM_SUB_POLICY
1383 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1384 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1388 if (IS_ERR(pols[1])) {
1389 err = PTR_ERR(pols[1]);
1392 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1397 xfrm_nr += pols[1]->xfrm_nr;
1402 * Because neither flowi nor bundle information knows about
1403 * transformation template size. On more than one policy usage
1404 * we can realize whether all of them is bypass or not after
1405 * they are searched. See above not-transformed bypass
1406 * is surrounded by non-sub policy configuration, too.
1409 /* Flow passes not transformed. */
1410 xfrm_pols_put(pols, npols);
1415 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1417 if (unlikely(nx<0)) {
1419 if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
1420 /* EREMOTE tells the caller to generate
1421 * a one-shot blackhole route.
1423 xfrm_pol_put(policy);
1426 if (err == -EAGAIN && flags) {
1427 DECLARE_WAITQUEUE(wait, current);
1429 add_wait_queue(&km_waitq, &wait);
1430 set_current_state(TASK_INTERRUPTIBLE);
1432 set_current_state(TASK_RUNNING);
1433 remove_wait_queue(&km_waitq, &wait);
1435 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1437 if (nx == -EAGAIN && signal_pending(current)) {
1441 if (nx == -EAGAIN ||
1442 genid != atomic_read(&flow_cache_genid)) {
1443 xfrm_pols_put(pols, npols);
1452 /* Flow passes not transformed. */
1453 xfrm_pols_put(pols, npols);
1458 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
1460 if (unlikely(err)) {
1462 for (i=0; i<nx; i++)
1463 xfrm_state_put(xfrm[i]);
1467 for (pi = 0; pi < npols; pi++) {
1468 read_lock_bh(&pols[pi]->lock);
1469 pol_dead |= pols[pi]->dead;
1470 read_unlock_bh(&pols[pi]->lock);
1473 write_lock_bh(&policy->lock);
1474 if (unlikely(pol_dead || stale_bundle(dst))) {
1475 /* Wow! While we worked on resolving, this
1476 * policy has gone. Retry. It is not paranoia,
1477 * we just cannot enlist new bundle to dead object.
1478 * We can't enlist stable bundles either.
1480 write_unlock_bh(&policy->lock);
1484 err = -EHOSTUNREACH;
1489 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1491 err = xfrm_dst_update_origin(dst, fl);
1492 if (unlikely(err)) {
1493 write_unlock_bh(&policy->lock);
1499 dst->next = policy->bundles;
1500 policy->bundles = dst;
1502 write_unlock_bh(&policy->lock);
1505 dst_release(dst_orig);
1506 xfrm_pols_put(pols, npols);
1510 xfrm_pols_put(pols, npols);
1512 dst_release(dst_orig);
1516 EXPORT_SYMBOL(__xfrm_lookup);
1518 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
1519 struct sock *sk, int flags)
1521 int err = __xfrm_lookup(dst_p, fl, sk, flags);
1523 if (err == -EREMOTE) {
1524 dst_release(*dst_p);
1531 EXPORT_SYMBOL(xfrm_lookup);
1534 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1536 struct xfrm_state *x;
1538 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1540 x = skb->sp->xvec[idx];
1541 if (!x->type->reject)
1543 return x->type->reject(x, skb, fl);
1546 /* When skb is transformed back to its "native" form, we have to
1547 * check policy restrictions. At the moment we make this in maximally
1548 * stupid way. Shame on me. :-) Of course, connected sockets must
1549 * have policy cached at them.
1553 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1554 unsigned short family)
1556 if (xfrm_state_kern(x))
1557 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1558 return x->id.proto == tmpl->id.proto &&
1559 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1560 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1561 x->props.mode == tmpl->mode &&
1562 ((tmpl->aalgos & (1<<x->props.aalgo)) ||
1563 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1564 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1565 xfrm_state_addr_cmp(tmpl, x, family));
1569 * 0 or more than 0 is returned when validation is succeeded (either bypass
1570 * because of optional transport mode, or next index of the mathced secpath
1571 * state with the template.
1572 * -1 is returned when no matching template is found.
1573 * Otherwise "-2 - errored_index" is returned.
1576 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1577 unsigned short family)
1581 if (tmpl->optional) {
1582 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1586 for (; idx < sp->len; idx++) {
1587 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1589 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1599 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
1601 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1604 if (unlikely(afinfo == NULL))
1605 return -EAFNOSUPPORT;
1607 afinfo->decode_session(skb, fl);
1608 err = security_xfrm_decode_session(skb, &fl->secid);
1609 xfrm_policy_put_afinfo(afinfo);
1612 EXPORT_SYMBOL(xfrm_decode_session);
1614 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
1616 for (; k < sp->len; k++) {
1617 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
1626 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1627 unsigned short family)
1629 struct xfrm_policy *pol;
1630 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1635 u8 fl_dir = policy_to_flow_dir(dir);
1638 if (xfrm_decode_session(skb, &fl, family) < 0)
1640 nf_nat_decode_session(skb, &fl, family);
1642 /* First, check used SA against their selectors. */
1646 for (i=skb->sp->len-1; i>=0; i--) {
1647 struct xfrm_state *x = skb->sp->xvec[i];
1648 if (!xfrm_selector_match(&x->sel, &fl, family))
1654 if (sk && sk->sk_policy[dir]) {
1655 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1661 pol = flow_cache_lookup(&fl, family, fl_dir,
1662 xfrm_policy_lookup);
1668 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1669 xfrm_secpath_reject(xerr_idx, skb, &fl);
1675 pol->curlft.use_time = get_seconds();
1679 #ifdef CONFIG_XFRM_SUB_POLICY
1680 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1681 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1685 if (IS_ERR(pols[1]))
1687 pols[1]->curlft.use_time = get_seconds();
1693 if (pol->action == XFRM_POLICY_ALLOW) {
1694 struct sec_path *sp;
1695 static struct sec_path dummy;
1696 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
1697 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
1698 struct xfrm_tmpl **tpp = tp;
1702 if ((sp = skb->sp) == NULL)
1705 for (pi = 0; pi < npols; pi++) {
1706 if (pols[pi] != pol &&
1707 pols[pi]->action != XFRM_POLICY_ALLOW)
1709 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
1711 for (i = 0; i < pols[pi]->xfrm_nr; i++)
1712 tpp[ti++] = &pols[pi]->xfrm_vec[i];
1716 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
1720 /* For each tunnel xfrm, find the first matching tmpl.
1721 * For each tmpl before that, find corresponding xfrm.
1722 * Order is _important_. Later we will implement
1723 * some barriers, but at the moment barriers
1724 * are implied between each two transformations.
1726 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
1727 k = xfrm_policy_ok(tpp[i], sp, k, family);
1730 /* "-2 - errored_index" returned */
1736 if (secpath_has_nontransport(sp, k, &xerr_idx))
1739 xfrm_pols_put(pols, npols);
1744 xfrm_secpath_reject(xerr_idx, skb, &fl);
1746 xfrm_pols_put(pols, npols);
1749 EXPORT_SYMBOL(__xfrm_policy_check);
1751 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1755 if (xfrm_decode_session(skb, &fl, family) < 0)
1758 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1760 EXPORT_SYMBOL(__xfrm_route_forward);
1762 /* Optimize later using cookies and generation ids. */
1764 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1766 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1767 * to "-1" to force all XFRM destinations to get validated by
1768 * dst_ops->check on every use. We do this because when a
1769 * normal route referenced by an XFRM dst is obsoleted we do
1770 * not go looking around for all parent referencing XFRM dsts
1771 * so that we can invalidate them. It is just too much work.
1772 * Instead we make the checks here on every use. For example:
1774 * XFRM dst A --> IPv4 dst X
1776 * X is the "xdst->route" of A (X is also the "dst->path" of A
1777 * in this example). If X is marked obsolete, "A" will not
1778 * notice. That's what we are validating here via the
1779 * stale_bundle() check.
1781 * When a policy's bundle is pruned, we dst_free() the XFRM
1782 * dst which causes it's ->obsolete field to be set to a
1783 * positive non-zero integer. If an XFRM dst has been pruned
1784 * like this, we want to force a new route lookup.
1786 if (dst->obsolete < 0 && !stale_bundle(dst))
1792 static int stale_bundle(struct dst_entry *dst)
1794 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
1797 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1799 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1800 dst->dev = init_net.loopback_dev;
1805 EXPORT_SYMBOL(xfrm_dst_ifdown);
1807 static void xfrm_link_failure(struct sk_buff *skb)
1809 /* Impossible. Such dst must be popped before reaches point of failure. */
1813 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1816 if (dst->obsolete) {
1824 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
1826 struct dst_entry *dst, **dstp;
1828 write_lock(&pol->lock);
1829 dstp = &pol->bundles;
1830 while ((dst=*dstp) != NULL) {
1833 dst->next = *gc_list_p;
1839 write_unlock(&pol->lock);
1842 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1844 struct dst_entry *gc_list = NULL;
1847 read_lock_bh(&xfrm_policy_lock);
1848 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
1849 struct xfrm_policy *pol;
1850 struct hlist_node *entry;
1851 struct hlist_head *table;
1854 hlist_for_each_entry(pol, entry,
1855 &xfrm_policy_inexact[dir], bydst)
1856 prune_one_bundle(pol, func, &gc_list);
1858 table = xfrm_policy_bydst[dir].table;
1859 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
1860 hlist_for_each_entry(pol, entry, table + i, bydst)
1861 prune_one_bundle(pol, func, &gc_list);
1864 read_unlock_bh(&xfrm_policy_lock);
1867 struct dst_entry *dst = gc_list;
1868 gc_list = dst->next;
1873 static int unused_bundle(struct dst_entry *dst)
1875 return !atomic_read(&dst->__refcnt);
1878 static void __xfrm_garbage_collect(void)
1880 xfrm_prune_bundles(unused_bundle);
1883 static int xfrm_flush_bundles(void)
1885 xfrm_prune_bundles(stale_bundle);
1889 void xfrm_init_pmtu(struct dst_entry *dst)
1892 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1893 u32 pmtu, route_mtu_cached;
1895 pmtu = dst_mtu(dst->child);
1896 xdst->child_mtu_cached = pmtu;
1898 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1900 route_mtu_cached = dst_mtu(xdst->route);
1901 xdst->route_mtu_cached = route_mtu_cached;
1903 if (pmtu > route_mtu_cached)
1904 pmtu = route_mtu_cached;
1906 dst->metrics[RTAX_MTU-1] = pmtu;
1907 } while ((dst = dst->next));
1910 EXPORT_SYMBOL(xfrm_init_pmtu);
1912 /* Check that the bundle accepts the flow and its components are
1916 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1917 struct flowi *fl, int family, int strict)
1919 struct dst_entry *dst = &first->u.dst;
1920 struct xfrm_dst *last;
1923 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1924 (dst->dev && !netif_running(dst->dev)))
1926 #ifdef CONFIG_XFRM_SUB_POLICY
1928 if (first->origin && !flow_cache_uli_match(first->origin, fl))
1930 if (first->partner &&
1931 !xfrm_selector_match(first->partner, fl, family))
1939 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1941 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1944 !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
1946 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1948 if (xdst->genid != dst->xfrm->genid)
1952 !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1953 !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
1956 mtu = dst_mtu(dst->child);
1957 if (xdst->child_mtu_cached != mtu) {
1959 xdst->child_mtu_cached = mtu;
1962 if (!dst_check(xdst->route, xdst->route_cookie))
1964 mtu = dst_mtu(xdst->route);
1965 if (xdst->route_mtu_cached != mtu) {
1967 xdst->route_mtu_cached = mtu;
1971 } while (dst->xfrm);
1976 mtu = last->child_mtu_cached;
1980 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1981 if (mtu > last->route_mtu_cached)
1982 mtu = last->route_mtu_cached;
1983 dst->metrics[RTAX_MTU-1] = mtu;
1988 last = (struct xfrm_dst *)last->u.dst.next;
1989 last->child_mtu_cached = mtu;
1995 EXPORT_SYMBOL(xfrm_bundle_ok);
1997 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2000 if (unlikely(afinfo == NULL))
2002 if (unlikely(afinfo->family >= NPROTO))
2003 return -EAFNOSUPPORT;
2004 write_lock_bh(&xfrm_policy_afinfo_lock);
2005 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2008 struct dst_ops *dst_ops = afinfo->dst_ops;
2009 if (likely(dst_ops->kmem_cachep == NULL))
2010 dst_ops->kmem_cachep = xfrm_dst_cache;
2011 if (likely(dst_ops->check == NULL))
2012 dst_ops->check = xfrm_dst_check;
2013 if (likely(dst_ops->negative_advice == NULL))
2014 dst_ops->negative_advice = xfrm_negative_advice;
2015 if (likely(dst_ops->link_failure == NULL))
2016 dst_ops->link_failure = xfrm_link_failure;
2017 if (likely(afinfo->garbage_collect == NULL))
2018 afinfo->garbage_collect = __xfrm_garbage_collect;
2019 xfrm_policy_afinfo[afinfo->family] = afinfo;
2021 write_unlock_bh(&xfrm_policy_afinfo_lock);
2024 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2026 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2029 if (unlikely(afinfo == NULL))
2031 if (unlikely(afinfo->family >= NPROTO))
2032 return -EAFNOSUPPORT;
2033 write_lock_bh(&xfrm_policy_afinfo_lock);
2034 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2035 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2038 struct dst_ops *dst_ops = afinfo->dst_ops;
2039 xfrm_policy_afinfo[afinfo->family] = NULL;
2040 dst_ops->kmem_cachep = NULL;
2041 dst_ops->check = NULL;
2042 dst_ops->negative_advice = NULL;
2043 dst_ops->link_failure = NULL;
2044 afinfo->garbage_collect = NULL;
2047 write_unlock_bh(&xfrm_policy_afinfo_lock);
2050 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2052 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2054 struct xfrm_policy_afinfo *afinfo;
2055 if (unlikely(family >= NPROTO))
2057 read_lock(&xfrm_policy_afinfo_lock);
2058 afinfo = xfrm_policy_afinfo[family];
2059 if (unlikely(!afinfo))
2060 read_unlock(&xfrm_policy_afinfo_lock);
2064 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2066 read_unlock(&xfrm_policy_afinfo_lock);
2069 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2071 struct net_device *dev = ptr;
2073 if (dev->nd_net != &init_net)
2078 xfrm_flush_bundles();
2083 static struct notifier_block xfrm_dev_notifier = {
2089 static void __init xfrm_policy_init(void)
2091 unsigned int hmask, sz;
2094 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2095 sizeof(struct xfrm_dst),
2096 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2100 sz = (hmask+1) * sizeof(struct hlist_head);
2102 xfrm_policy_byidx = xfrm_hash_alloc(sz);
2103 xfrm_idx_hmask = hmask;
2104 if (!xfrm_policy_byidx)
2105 panic("XFRM: failed to allocate byidx hash\n");
2107 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2108 struct xfrm_policy_hash *htab;
2110 INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
2112 htab = &xfrm_policy_bydst[dir];
2113 htab->table = xfrm_hash_alloc(sz);
2114 htab->hmask = hmask;
2116 panic("XFRM: failed to allocate bydst hash\n");
2119 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2120 register_netdevice_notifier(&xfrm_dev_notifier);
2123 void __init xfrm_init(void)
2130 #ifdef CONFIG_AUDITSYSCALL
2131 static inline void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2132 struct audit_buffer *audit_buf)
2135 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2136 xp->security->ctx_alg, xp->security->ctx_doi,
2137 xp->security->ctx_str);
2139 switch(xp->selector.family) {
2141 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2142 NIPQUAD(xp->selector.saddr.a4),
2143 NIPQUAD(xp->selector.daddr.a4));
2147 struct in6_addr saddr6, daddr6;
2149 memcpy(&saddr6, xp->selector.saddr.a6,
2150 sizeof(struct in6_addr));
2151 memcpy(&daddr6, xp->selector.daddr.a6,
2152 sizeof(struct in6_addr));
2153 audit_log_format(audit_buf,
2154 " src=" NIP6_FMT " dst=" NIP6_FMT,
2155 NIP6(saddr6), NIP6(daddr6));
2162 xfrm_audit_policy_add(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
2164 struct audit_buffer *audit_buf;
2165 extern int audit_enabled;
2167 if (audit_enabled == 0)
2169 audit_buf = xfrm_audit_start(auid, sid);
2170 if (audit_buf == NULL)
2172 audit_log_format(audit_buf, " op=SPD-add res=%u", result);
2173 xfrm_audit_common_policyinfo(xp, audit_buf);
2174 audit_log_end(audit_buf);
2176 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2179 xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
2181 struct audit_buffer *audit_buf;
2182 extern int audit_enabled;
2184 if (audit_enabled == 0)
2186 audit_buf = xfrm_audit_start(auid, sid);
2187 if (audit_buf == NULL)
2189 audit_log_format(audit_buf, " op=SPD-delete res=%u", result);
2190 xfrm_audit_common_policyinfo(xp, audit_buf);
2191 audit_log_end(audit_buf);
2193 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2196 #ifdef CONFIG_XFRM_MIGRATE
2197 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2198 struct xfrm_selector *sel_tgt)
2200 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2201 if (sel_tgt->family == sel_cmp->family &&
2202 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2203 sel_cmp->family) == 0 &&
2204 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2205 sel_cmp->family) == 0 &&
2206 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2207 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2211 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2218 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2221 struct xfrm_policy *pol, *ret = NULL;
2222 struct hlist_node *entry;
2223 struct hlist_head *chain;
2226 read_lock_bh(&xfrm_policy_lock);
2227 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
2228 hlist_for_each_entry(pol, entry, chain, bydst) {
2229 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2230 pol->type == type) {
2232 priority = ret->priority;
2236 chain = &xfrm_policy_inexact[dir];
2237 hlist_for_each_entry(pol, entry, chain, bydst) {
2238 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2239 pol->type == type &&
2240 pol->priority < priority) {
2249 read_unlock_bh(&xfrm_policy_lock);
2254 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2258 if (t->mode == m->mode && t->id.proto == m->proto &&
2259 (m->reqid == 0 || t->reqid == m->reqid)) {
2261 case XFRM_MODE_TUNNEL:
2262 case XFRM_MODE_BEET:
2263 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2264 m->old_family) == 0 &&
2265 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2266 m->old_family) == 0) {
2270 case XFRM_MODE_TRANSPORT:
2271 /* in case of transport mode, template does not store
2272 any IP addresses, hence we just compare mode and
2283 /* update endpoint address(es) of template(s) */
2284 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2285 struct xfrm_migrate *m, int num_migrate)
2287 struct xfrm_migrate *mp;
2288 struct dst_entry *dst;
2291 write_lock_bh(&pol->lock);
2292 if (unlikely(pol->dead)) {
2293 /* target policy has been deleted */
2294 write_unlock_bh(&pol->lock);
2298 for (i = 0; i < pol->xfrm_nr; i++) {
2299 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2300 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2303 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2304 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2306 /* update endpoints */
2307 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2308 sizeof(pol->xfrm_vec[i].id.daddr));
2309 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2310 sizeof(pol->xfrm_vec[i].saddr));
2311 pol->xfrm_vec[i].encap_family = mp->new_family;
2313 while ((dst = pol->bundles) != NULL) {
2314 pol->bundles = dst->next;
2320 write_unlock_bh(&pol->lock);
2328 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2332 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2335 for (i = 0; i < num_migrate; i++) {
2336 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2337 m[i].old_family) == 0) &&
2338 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2339 m[i].old_family) == 0))
2341 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2342 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2345 /* check if there is any duplicated entry */
2346 for (j = i + 1; j < num_migrate; j++) {
2347 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2348 sizeof(m[i].old_daddr)) &&
2349 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2350 sizeof(m[i].old_saddr)) &&
2351 m[i].proto == m[j].proto &&
2352 m[i].mode == m[j].mode &&
2353 m[i].reqid == m[j].reqid &&
2354 m[i].old_family == m[j].old_family)
2362 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2363 struct xfrm_migrate *m, int num_migrate)
2365 int i, err, nx_cur = 0, nx_new = 0;
2366 struct xfrm_policy *pol = NULL;
2367 struct xfrm_state *x, *xc;
2368 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2369 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2370 struct xfrm_migrate *mp;
2372 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2375 /* Stage 1 - find policy */
2376 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2381 /* Stage 2 - find and update state(s) */
2382 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2383 if ((x = xfrm_migrate_state_find(mp))) {
2386 if ((xc = xfrm_state_migrate(x, mp))) {
2396 /* Stage 3 - update policy */
2397 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2400 /* Stage 4 - delete old state(s) */
2402 xfrm_states_put(x_cur, nx_cur);
2403 xfrm_states_delete(x_cur, nx_cur);
2406 /* Stage 5 - announce */
2407 km_migrate(sel, dir, type, m, num_migrate);
2419 xfrm_states_put(x_cur, nx_cur);
2421 xfrm_states_delete(x_new, nx_new);
2425 EXPORT_SYMBOL(xfrm_migrate);