6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/slab.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/workqueue.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/module.h>
25 #include <linux/cache.h>
29 #include "xfrm_hash.h"
31 int sysctl_xfrm_larval_drop __read_mostly;
33 DEFINE_MUTEX(xfrm_cfg_mutex);
34 EXPORT_SYMBOL(xfrm_cfg_mutex);
36 static DEFINE_RWLOCK(xfrm_policy_lock);
38 unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
39 EXPORT_SYMBOL(xfrm_policy_count);
41 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
42 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
44 static struct kmem_cache *xfrm_dst_cache __read_mostly;
46 static struct work_struct xfrm_policy_gc_work;
47 static HLIST_HEAD(xfrm_policy_gc_list);
48 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
54 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
56 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
57 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
58 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
59 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
60 (fl->proto == sel->proto || !sel->proto) &&
61 (fl->oif == sel->ifindex || !sel->ifindex);
65 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
67 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
68 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
69 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
70 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
71 (fl->proto == sel->proto || !sel->proto) &&
72 (fl->oif == sel->ifindex || !sel->ifindex);
75 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
76 unsigned short family)
80 return __xfrm4_selector_match(sel, fl);
82 return __xfrm6_selector_match(sel, fl);
87 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
88 unsigned short family)
90 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
93 if (unlikely(afinfo == NULL))
96 if (likely(afinfo->dst_lookup != NULL))
97 err = afinfo->dst_lookup(dst, fl);
100 xfrm_policy_put_afinfo(afinfo);
103 EXPORT_SYMBOL(xfrm_dst_lookup);
105 static inline unsigned long make_jiffies(long secs)
107 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
108 return MAX_SCHEDULE_TIMEOUT-1;
113 static void xfrm_policy_timer(unsigned long data)
115 struct xfrm_policy *xp = (struct xfrm_policy*)data;
116 unsigned long now = get_seconds();
117 long next = LONG_MAX;
121 read_lock(&xp->lock);
126 dir = xfrm_policy_id2dir(xp->index);
128 if (xp->lft.hard_add_expires_seconds) {
129 long tmo = xp->lft.hard_add_expires_seconds +
130 xp->curlft.add_time - now;
136 if (xp->lft.hard_use_expires_seconds) {
137 long tmo = xp->lft.hard_use_expires_seconds +
138 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
144 if (xp->lft.soft_add_expires_seconds) {
145 long tmo = xp->lft.soft_add_expires_seconds +
146 xp->curlft.add_time - now;
149 tmo = XFRM_KM_TIMEOUT;
154 if (xp->lft.soft_use_expires_seconds) {
155 long tmo = xp->lft.soft_use_expires_seconds +
156 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
159 tmo = XFRM_KM_TIMEOUT;
166 km_policy_expired(xp, dir, 0, 0);
167 if (next != LONG_MAX &&
168 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
172 read_unlock(&xp->lock);
177 read_unlock(&xp->lock);
178 if (!xfrm_policy_delete(xp, dir))
179 km_policy_expired(xp, dir, 1, 0);
184 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
188 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
190 struct xfrm_policy *policy;
192 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
195 INIT_HLIST_NODE(&policy->bydst);
196 INIT_HLIST_NODE(&policy->byidx);
197 rwlock_init(&policy->lock);
198 atomic_set(&policy->refcnt, 1);
199 init_timer(&policy->timer);
200 policy->timer.data = (unsigned long)policy;
201 policy->timer.function = xfrm_policy_timer;
205 EXPORT_SYMBOL(xfrm_policy_alloc);
207 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
209 void __xfrm_policy_destroy(struct xfrm_policy *policy)
211 BUG_ON(!policy->dead);
213 BUG_ON(policy->bundles);
215 if (del_timer(&policy->timer))
218 security_xfrm_policy_free(policy);
221 EXPORT_SYMBOL(__xfrm_policy_destroy);
223 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
225 struct dst_entry *dst;
227 while ((dst = policy->bundles) != NULL) {
228 policy->bundles = dst->next;
232 if (del_timer(&policy->timer))
233 atomic_dec(&policy->refcnt);
235 if (atomic_read(&policy->refcnt) > 1)
238 xfrm_pol_put(policy);
241 static void xfrm_policy_gc_task(struct work_struct *work)
243 struct xfrm_policy *policy;
244 struct hlist_node *entry, *tmp;
245 struct hlist_head gc_list;
247 spin_lock_bh(&xfrm_policy_gc_lock);
248 gc_list.first = xfrm_policy_gc_list.first;
249 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
250 spin_unlock_bh(&xfrm_policy_gc_lock);
252 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
253 xfrm_policy_gc_kill(policy);
256 /* Rule must be locked. Release descentant resources, announce
257 * entry dead. The rule must be unlinked from lists to the moment.
260 static void xfrm_policy_kill(struct xfrm_policy *policy)
264 write_lock_bh(&policy->lock);
267 write_unlock_bh(&policy->lock);
269 if (unlikely(dead)) {
274 spin_lock(&xfrm_policy_gc_lock);
275 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
276 spin_unlock(&xfrm_policy_gc_lock);
278 schedule_work(&xfrm_policy_gc_work);
281 struct xfrm_policy_hash {
282 struct hlist_head *table;
286 static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
287 static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
288 static struct hlist_head *xfrm_policy_byidx __read_mostly;
289 static unsigned int xfrm_idx_hmask __read_mostly;
290 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
292 static inline unsigned int idx_hash(u32 index)
294 return __idx_hash(index, xfrm_idx_hmask);
297 static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
299 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
300 unsigned int hash = __sel_hash(sel, family, hmask);
302 return (hash == hmask + 1 ?
303 &xfrm_policy_inexact[dir] :
304 xfrm_policy_bydst[dir].table + hash);
307 static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
309 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
310 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
312 return xfrm_policy_bydst[dir].table + hash;
315 static void xfrm_dst_hash_transfer(struct hlist_head *list,
316 struct hlist_head *ndsttable,
317 unsigned int nhashmask)
319 struct hlist_node *entry, *tmp;
320 struct xfrm_policy *pol;
322 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
325 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
326 pol->family, nhashmask);
327 hlist_add_head(&pol->bydst, ndsttable+h);
331 static void xfrm_idx_hash_transfer(struct hlist_head *list,
332 struct hlist_head *nidxtable,
333 unsigned int nhashmask)
335 struct hlist_node *entry, *tmp;
336 struct xfrm_policy *pol;
338 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
341 h = __idx_hash(pol->index, nhashmask);
342 hlist_add_head(&pol->byidx, nidxtable+h);
346 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
348 return ((old_hmask + 1) << 1) - 1;
351 static void xfrm_bydst_resize(int dir)
353 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
354 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
355 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
356 struct hlist_head *odst = xfrm_policy_bydst[dir].table;
357 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
363 write_lock_bh(&xfrm_policy_lock);
365 for (i = hmask; i >= 0; i--)
366 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
368 xfrm_policy_bydst[dir].table = ndst;
369 xfrm_policy_bydst[dir].hmask = nhashmask;
371 write_unlock_bh(&xfrm_policy_lock);
373 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
376 static void xfrm_byidx_resize(int total)
378 unsigned int hmask = xfrm_idx_hmask;
379 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
380 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
381 struct hlist_head *oidx = xfrm_policy_byidx;
382 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
388 write_lock_bh(&xfrm_policy_lock);
390 for (i = hmask; i >= 0; i--)
391 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
393 xfrm_policy_byidx = nidx;
394 xfrm_idx_hmask = nhashmask;
396 write_unlock_bh(&xfrm_policy_lock);
398 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
401 static inline int xfrm_bydst_should_resize(int dir, int *total)
403 unsigned int cnt = xfrm_policy_count[dir];
404 unsigned int hmask = xfrm_policy_bydst[dir].hmask;
409 if ((hmask + 1) < xfrm_policy_hashmax &&
416 static inline int xfrm_byidx_should_resize(int total)
418 unsigned int hmask = xfrm_idx_hmask;
420 if ((hmask + 1) < xfrm_policy_hashmax &&
427 void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
429 read_lock_bh(&xfrm_policy_lock);
430 si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
431 si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
432 si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
433 si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
434 si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
435 si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
436 si->spdhcnt = xfrm_idx_hmask;
437 si->spdhmcnt = xfrm_policy_hashmax;
438 read_unlock_bh(&xfrm_policy_lock);
440 EXPORT_SYMBOL(xfrm_spd_getinfo);
442 static DEFINE_MUTEX(hash_resize_mutex);
443 static void xfrm_hash_resize(struct work_struct *__unused)
447 mutex_lock(&hash_resize_mutex);
450 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
451 if (xfrm_bydst_should_resize(dir, &total))
452 xfrm_bydst_resize(dir);
454 if (xfrm_byidx_should_resize(total))
455 xfrm_byidx_resize(total);
457 mutex_unlock(&hash_resize_mutex);
460 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
462 /* Generate new index... KAME seems to generate them ordered by cost
463 * of an absolute inpredictability of ordering of rules. This will not pass. */
464 static u32 xfrm_gen_index(u8 type, int dir)
466 static u32 idx_generator;
469 struct hlist_node *entry;
470 struct hlist_head *list;
471 struct xfrm_policy *p;
475 idx = (idx_generator | dir);
479 list = xfrm_policy_byidx + idx_hash(idx);
481 hlist_for_each_entry(p, entry, list, byidx) {
482 if (p->index == idx) {
492 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
494 u32 *p1 = (u32 *) s1;
495 u32 *p2 = (u32 *) s2;
496 int len = sizeof(struct xfrm_selector) / sizeof(u32);
499 for (i = 0; i < len; i++) {
507 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
509 struct xfrm_policy *pol;
510 struct xfrm_policy *delpol;
511 struct hlist_head *chain;
512 struct hlist_node *entry, *newpos;
513 struct dst_entry *gc_list;
515 write_lock_bh(&xfrm_policy_lock);
516 chain = policy_hash_bysel(&policy->selector, policy->family, dir);
519 hlist_for_each_entry(pol, entry, chain, bydst) {
520 if (pol->type == policy->type &&
521 !selector_cmp(&pol->selector, &policy->selector) &&
522 xfrm_sec_ctx_match(pol->security, policy->security) &&
525 write_unlock_bh(&xfrm_policy_lock);
529 if (policy->priority > pol->priority)
531 } else if (policy->priority >= pol->priority) {
532 newpos = &pol->bydst;
539 hlist_add_after(newpos, &policy->bydst);
541 hlist_add_head(&policy->bydst, chain);
542 xfrm_pol_hold(policy);
543 xfrm_policy_count[dir]++;
544 atomic_inc(&flow_cache_genid);
546 hlist_del(&delpol->bydst);
547 hlist_del(&delpol->byidx);
548 xfrm_policy_count[dir]--;
550 policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
551 hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
552 policy->curlft.add_time = get_seconds();
553 policy->curlft.use_time = 0;
554 if (!mod_timer(&policy->timer, jiffies + HZ))
555 xfrm_pol_hold(policy);
556 write_unlock_bh(&xfrm_policy_lock);
559 xfrm_policy_kill(delpol);
560 else if (xfrm_bydst_should_resize(dir, NULL))
561 schedule_work(&xfrm_hash_work);
563 read_lock_bh(&xfrm_policy_lock);
565 entry = &policy->bydst;
566 hlist_for_each_entry_continue(policy, entry, bydst) {
567 struct dst_entry *dst;
569 write_lock(&policy->lock);
570 dst = policy->bundles;
572 struct dst_entry *tail = dst;
575 tail->next = gc_list;
578 policy->bundles = NULL;
580 write_unlock(&policy->lock);
582 read_unlock_bh(&xfrm_policy_lock);
585 struct dst_entry *dst = gc_list;
593 EXPORT_SYMBOL(xfrm_policy_insert);
595 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
596 struct xfrm_selector *sel,
597 struct xfrm_sec_ctx *ctx, int delete,
600 struct xfrm_policy *pol, *ret;
601 struct hlist_head *chain;
602 struct hlist_node *entry;
605 write_lock_bh(&xfrm_policy_lock);
606 chain = policy_hash_bysel(sel, sel->family, dir);
608 hlist_for_each_entry(pol, entry, chain, bydst) {
609 if (pol->type == type &&
610 !selector_cmp(sel, &pol->selector) &&
611 xfrm_sec_ctx_match(ctx, pol->security)) {
614 *err = security_xfrm_policy_delete(pol);
616 write_unlock_bh(&xfrm_policy_lock);
619 hlist_del(&pol->bydst);
620 hlist_del(&pol->byidx);
621 xfrm_policy_count[dir]--;
627 write_unlock_bh(&xfrm_policy_lock);
630 atomic_inc(&flow_cache_genid);
631 xfrm_policy_kill(ret);
635 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
637 struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
640 struct xfrm_policy *pol, *ret;
641 struct hlist_head *chain;
642 struct hlist_node *entry;
645 if (xfrm_policy_id2dir(id) != dir)
649 write_lock_bh(&xfrm_policy_lock);
650 chain = xfrm_policy_byidx + idx_hash(id);
652 hlist_for_each_entry(pol, entry, chain, byidx) {
653 if (pol->type == type && pol->index == id) {
656 *err = security_xfrm_policy_delete(pol);
658 write_unlock_bh(&xfrm_policy_lock);
661 hlist_del(&pol->bydst);
662 hlist_del(&pol->byidx);
663 xfrm_policy_count[dir]--;
669 write_unlock_bh(&xfrm_policy_lock);
672 atomic_inc(&flow_cache_genid);
673 xfrm_policy_kill(ret);
677 EXPORT_SYMBOL(xfrm_policy_byid);
679 #ifdef CONFIG_SECURITY_NETWORK_XFRM
681 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
685 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
686 struct xfrm_policy *pol;
687 struct hlist_node *entry;
690 hlist_for_each_entry(pol, entry,
691 &xfrm_policy_inexact[dir], bydst) {
692 if (pol->type != type)
694 err = security_xfrm_policy_delete(pol);
696 xfrm_audit_policy_delete(pol, 0,
697 audit_info->loginuid,
702 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
703 hlist_for_each_entry(pol, entry,
704 xfrm_policy_bydst[dir].table + i,
706 if (pol->type != type)
708 err = security_xfrm_policy_delete(pol);
710 xfrm_audit_policy_delete(pol, 0,
711 audit_info->loginuid,
722 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
728 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
732 write_lock_bh(&xfrm_policy_lock);
734 err = xfrm_policy_flush_secctx_check(type, audit_info);
738 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
739 struct xfrm_policy *pol;
740 struct hlist_node *entry;
745 hlist_for_each_entry(pol, entry,
746 &xfrm_policy_inexact[dir], bydst) {
747 if (pol->type != type)
749 hlist_del(&pol->bydst);
750 hlist_del(&pol->byidx);
751 write_unlock_bh(&xfrm_policy_lock);
753 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
756 xfrm_policy_kill(pol);
759 write_lock_bh(&xfrm_policy_lock);
763 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
765 hlist_for_each_entry(pol, entry,
766 xfrm_policy_bydst[dir].table + i,
768 if (pol->type != type)
770 hlist_del(&pol->bydst);
771 hlist_del(&pol->byidx);
772 write_unlock_bh(&xfrm_policy_lock);
774 xfrm_audit_policy_delete(pol, 1,
775 audit_info->loginuid,
777 xfrm_policy_kill(pol);
780 write_lock_bh(&xfrm_policy_lock);
785 xfrm_policy_count[dir] -= killed;
787 atomic_inc(&flow_cache_genid);
789 write_unlock_bh(&xfrm_policy_lock);
792 EXPORT_SYMBOL(xfrm_policy_flush);
794 int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
797 struct xfrm_policy *pol, *last = NULL;
798 struct hlist_node *entry;
799 int dir, last_dir = 0, count, error;
801 read_lock_bh(&xfrm_policy_lock);
804 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
805 struct hlist_head *table = xfrm_policy_bydst[dir].table;
808 hlist_for_each_entry(pol, entry,
809 &xfrm_policy_inexact[dir], bydst) {
810 if (pol->type != type)
813 error = func(last, last_dir % XFRM_POLICY_MAX,
822 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
823 hlist_for_each_entry(pol, entry, table + i, bydst) {
824 if (pol->type != type)
827 error = func(last, last_dir % XFRM_POLICY_MAX,
842 error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
844 read_unlock_bh(&xfrm_policy_lock);
847 EXPORT_SYMBOL(xfrm_policy_walk);
850 * Find policy to apply to this flow.
852 * Returns 0 if policy found, else an -errno.
854 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
855 u8 type, u16 family, int dir)
857 struct xfrm_selector *sel = &pol->selector;
858 int match, ret = -ESRCH;
860 if (pol->family != family ||
864 match = xfrm_selector_match(sel, fl, family);
866 ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
871 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
875 struct xfrm_policy *pol, *ret;
876 xfrm_address_t *daddr, *saddr;
877 struct hlist_node *entry;
878 struct hlist_head *chain;
881 daddr = xfrm_flowi_daddr(fl, family);
882 saddr = xfrm_flowi_saddr(fl, family);
883 if (unlikely(!daddr || !saddr))
886 read_lock_bh(&xfrm_policy_lock);
887 chain = policy_hash_direct(daddr, saddr, family, dir);
889 hlist_for_each_entry(pol, entry, chain, bydst) {
890 err = xfrm_policy_match(pol, fl, type, family, dir);
900 priority = ret->priority;
904 chain = &xfrm_policy_inexact[dir];
905 hlist_for_each_entry(pol, entry, chain, bydst) {
906 err = xfrm_policy_match(pol, fl, type, family, dir);
914 } else if (pol->priority < priority) {
922 read_unlock_bh(&xfrm_policy_lock);
927 static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
928 void **objp, atomic_t **obj_refp)
930 struct xfrm_policy *pol;
933 #ifdef CONFIG_XFRM_SUB_POLICY
934 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
942 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
947 #ifdef CONFIG_XFRM_SUB_POLICY
950 if ((*objp = (void *) pol) != NULL)
951 *obj_refp = &pol->refcnt;
955 static inline int policy_to_flow_dir(int dir)
957 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
958 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
959 XFRM_POLICY_FWD == FLOW_DIR_FWD)
965 case XFRM_POLICY_OUT:
967 case XFRM_POLICY_FWD:
972 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
974 struct xfrm_policy *pol;
976 read_lock_bh(&xfrm_policy_lock);
977 if ((pol = sk->sk_policy[dir]) != NULL) {
978 int match = xfrm_selector_match(&pol->selector, fl,
983 err = security_xfrm_policy_lookup(pol, fl->secid,
984 policy_to_flow_dir(dir));
987 else if (err == -ESRCH)
994 read_unlock_bh(&xfrm_policy_lock);
998 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1000 struct hlist_head *chain = policy_hash_bysel(&pol->selector,
1003 hlist_add_head(&pol->bydst, chain);
1004 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
1005 xfrm_policy_count[dir]++;
1008 if (xfrm_bydst_should_resize(dir, NULL))
1009 schedule_work(&xfrm_hash_work);
1012 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1015 if (hlist_unhashed(&pol->bydst))
1018 hlist_del(&pol->bydst);
1019 hlist_del(&pol->byidx);
1020 xfrm_policy_count[dir]--;
1025 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1027 write_lock_bh(&xfrm_policy_lock);
1028 pol = __xfrm_policy_unlink(pol, dir);
1029 write_unlock_bh(&xfrm_policy_lock);
1031 if (dir < XFRM_POLICY_MAX)
1032 atomic_inc(&flow_cache_genid);
1033 xfrm_policy_kill(pol);
1038 EXPORT_SYMBOL(xfrm_policy_delete);
1040 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1042 struct xfrm_policy *old_pol;
1044 #ifdef CONFIG_XFRM_SUB_POLICY
1045 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1049 write_lock_bh(&xfrm_policy_lock);
1050 old_pol = sk->sk_policy[dir];
1051 sk->sk_policy[dir] = pol;
1053 pol->curlft.add_time = get_seconds();
1054 pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
1055 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1058 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1059 write_unlock_bh(&xfrm_policy_lock);
1062 xfrm_policy_kill(old_pol);
1067 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1069 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
1072 newp->selector = old->selector;
1073 if (security_xfrm_policy_clone(old, newp)) {
1075 return NULL; /* ENOMEM */
1077 newp->lft = old->lft;
1078 newp->curlft = old->curlft;
1079 newp->action = old->action;
1080 newp->flags = old->flags;
1081 newp->xfrm_nr = old->xfrm_nr;
1082 newp->index = old->index;
1083 newp->type = old->type;
1084 memcpy(newp->xfrm_vec, old->xfrm_vec,
1085 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1086 write_lock_bh(&xfrm_policy_lock);
1087 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1088 write_unlock_bh(&xfrm_policy_lock);
1094 int __xfrm_sk_clone_policy(struct sock *sk)
1096 struct xfrm_policy *p0 = sk->sk_policy[0],
1097 *p1 = sk->sk_policy[1];
1099 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1100 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1102 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1108 xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
1109 unsigned short family)
1112 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1114 if (unlikely(afinfo == NULL))
1116 err = afinfo->get_saddr(local, remote);
1117 xfrm_policy_put_afinfo(afinfo);
1121 /* Resolve list of templates for the flow, given policy. */
1124 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1125 struct xfrm_state **xfrm,
1126 unsigned short family)
1130 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1131 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1134 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1135 struct xfrm_state *x;
1136 xfrm_address_t *remote = daddr;
1137 xfrm_address_t *local = saddr;
1138 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1140 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1141 tmpl->mode == XFRM_MODE_BEET) {
1142 remote = &tmpl->id.daddr;
1143 local = &tmpl->saddr;
1144 family = tmpl->encap_family;
1145 if (xfrm_addr_any(local, family)) {
1146 error = xfrm_get_saddr(&tmp, remote, family);
1153 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1155 if (x && x->km.state == XFRM_STATE_VALID) {
1162 error = (x->km.state == XFRM_STATE_ERROR ?
1167 if (!tmpl->optional)
1173 for (nx--; nx>=0; nx--)
1174 xfrm_state_put(xfrm[nx]);
1179 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1180 struct xfrm_state **xfrm,
1181 unsigned short family)
1183 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1184 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1190 for (i = 0; i < npols; i++) {
1191 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1196 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1204 /* found states are sorted for outbound processing */
1206 xfrm_state_sort(xfrm, tpp, cnx, family);
1211 for (cnx--; cnx>=0; cnx--)
1212 xfrm_state_put(tpp[cnx]);
1217 /* Check that the bundle accepts the flow and its components are
1221 static struct dst_entry *
1222 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1224 struct dst_entry *x;
1225 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1226 if (unlikely(afinfo == NULL))
1227 return ERR_PTR(-EINVAL);
1228 x = afinfo->find_bundle(fl, policy);
1229 xfrm_policy_put_afinfo(afinfo);
1233 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1234 * all the metrics... Shortly, bundle a bundle.
1238 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
1239 struct flowi *fl, struct dst_entry **dst_p,
1240 unsigned short family)
1243 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1244 if (unlikely(afinfo == NULL))
1246 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
1247 xfrm_policy_put_afinfo(afinfo);
1252 xfrm_dst_alloc_copy(void **target, void *src, int size)
1255 *target = kmalloc(size, GFP_ATOMIC);
1259 memcpy(*target, src, size);
1264 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
1266 #ifdef CONFIG_XFRM_SUB_POLICY
1267 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1268 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1276 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1278 #ifdef CONFIG_XFRM_SUB_POLICY
1279 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1280 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1286 static int stale_bundle(struct dst_entry *dst);
1288 /* Main function: finds/creates a bundle for given flow.
1290 * At the moment we eat a raw IP route. Mostly to speed up lookups
1291 * on interfaces with disabled IPsec.
1293 int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
1294 struct sock *sk, int flags)
1296 struct xfrm_policy *policy;
1297 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1302 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1303 struct dst_entry *dst, *dst_orig = *dst_p;
1308 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1311 genid = atomic_read(&flow_cache_genid);
1313 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
1319 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1320 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1322 return PTR_ERR(policy);
1326 /* To accelerate a bit... */
1327 if ((dst_orig->flags & DST_NOXFRM) ||
1328 !xfrm_policy_count[XFRM_POLICY_OUT])
1331 policy = flow_cache_lookup(fl, dst_orig->ops->family,
1332 dir, xfrm_policy_lookup);
1334 return PTR_ERR(policy);
1340 family = dst_orig->ops->family;
1341 policy->curlft.use_time = get_seconds();
1344 xfrm_nr += pols[0]->xfrm_nr;
1346 switch (policy->action) {
1348 case XFRM_POLICY_BLOCK:
1349 /* Prohibit the flow */
1353 case XFRM_POLICY_ALLOW:
1354 #ifndef CONFIG_XFRM_SUB_POLICY
1355 if (policy->xfrm_nr == 0) {
1356 /* Flow passes not transformed. */
1357 xfrm_pol_put(policy);
1362 /* Try to find matching bundle.
1364 * LATER: help from flow cache. It is optional, this
1365 * is required only for output policy.
1367 dst = xfrm_find_bundle(fl, policy, family);
1376 #ifdef CONFIG_XFRM_SUB_POLICY
1377 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1378 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1382 if (IS_ERR(pols[1])) {
1383 err = PTR_ERR(pols[1]);
1386 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1391 xfrm_nr += pols[1]->xfrm_nr;
1396 * Because neither flowi nor bundle information knows about
1397 * transformation template size. On more than one policy usage
1398 * we can realize whether all of them is bypass or not after
1399 * they are searched. See above not-transformed bypass
1400 * is surrounded by non-sub policy configuration, too.
1403 /* Flow passes not transformed. */
1404 xfrm_pols_put(pols, npols);
1409 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1411 if (unlikely(nx<0)) {
1413 if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
1414 /* EREMOTE tells the caller to generate
1415 * a one-shot blackhole route.
1417 xfrm_pol_put(policy);
1420 if (err == -EAGAIN && flags) {
1421 DECLARE_WAITQUEUE(wait, current);
1423 add_wait_queue(&km_waitq, &wait);
1424 set_current_state(TASK_INTERRUPTIBLE);
1426 set_current_state(TASK_RUNNING);
1427 remove_wait_queue(&km_waitq, &wait);
1429 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1431 if (nx == -EAGAIN && signal_pending(current)) {
1435 if (nx == -EAGAIN ||
1436 genid != atomic_read(&flow_cache_genid)) {
1437 xfrm_pols_put(pols, npols);
1446 /* Flow passes not transformed. */
1447 xfrm_pols_put(pols, npols);
1452 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
1454 if (unlikely(err)) {
1456 for (i=0; i<nx; i++)
1457 xfrm_state_put(xfrm[i]);
1461 for (pi = 0; pi < npols; pi++) {
1462 read_lock_bh(&pols[pi]->lock);
1463 pol_dead |= pols[pi]->dead;
1464 read_unlock_bh(&pols[pi]->lock);
1467 write_lock_bh(&policy->lock);
1468 if (unlikely(pol_dead || stale_bundle(dst))) {
1469 /* Wow! While we worked on resolving, this
1470 * policy has gone. Retry. It is not paranoia,
1471 * we just cannot enlist new bundle to dead object.
1472 * We can't enlist stable bundles either.
1474 write_unlock_bh(&policy->lock);
1478 err = -EHOSTUNREACH;
1483 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1485 err = xfrm_dst_update_origin(dst, fl);
1486 if (unlikely(err)) {
1487 write_unlock_bh(&policy->lock);
1493 dst->next = policy->bundles;
1494 policy->bundles = dst;
1496 write_unlock_bh(&policy->lock);
1499 dst_release(dst_orig);
1500 xfrm_pols_put(pols, npols);
1504 dst_release(dst_orig);
1505 xfrm_pols_put(pols, npols);
1509 EXPORT_SYMBOL(__xfrm_lookup);
1511 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
1512 struct sock *sk, int flags)
1514 int err = __xfrm_lookup(dst_p, fl, sk, flags);
1516 if (err == -EREMOTE) {
1517 dst_release(*dst_p);
1524 EXPORT_SYMBOL(xfrm_lookup);
1527 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1529 struct xfrm_state *x;
1531 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1533 x = skb->sp->xvec[idx];
1534 if (!x->type->reject)
1536 return x->type->reject(x, skb, fl);
1539 /* When skb is transformed back to its "native" form, we have to
1540 * check policy restrictions. At the moment we make this in maximally
1541 * stupid way. Shame on me. :-) Of course, connected sockets must
1542 * have policy cached at them.
1546 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1547 unsigned short family)
1549 if (xfrm_state_kern(x))
1550 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1551 return x->id.proto == tmpl->id.proto &&
1552 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1553 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1554 x->props.mode == tmpl->mode &&
1555 ((tmpl->aalgos & (1<<x->props.aalgo)) ||
1556 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1557 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1558 xfrm_state_addr_cmp(tmpl, x, family));
1562 * 0 or more than 0 is returned when validation is succeeded (either bypass
1563 * because of optional transport mode, or next index of the mathced secpath
1564 * state with the template.
1565 * -1 is returned when no matching template is found.
1566 * Otherwise "-2 - errored_index" is returned.
1569 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1570 unsigned short family)
1574 if (tmpl->optional) {
1575 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1579 for (; idx < sp->len; idx++) {
1580 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1582 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1592 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
1594 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1597 if (unlikely(afinfo == NULL))
1598 return -EAFNOSUPPORT;
1600 afinfo->decode_session(skb, fl);
1601 err = security_xfrm_decode_session(skb, &fl->secid);
1602 xfrm_policy_put_afinfo(afinfo);
1605 EXPORT_SYMBOL(xfrm_decode_session);
1607 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
1609 for (; k < sp->len; k++) {
1610 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
1619 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1620 unsigned short family)
1622 struct xfrm_policy *pol;
1623 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1628 u8 fl_dir = policy_to_flow_dir(dir);
1631 if (xfrm_decode_session(skb, &fl, family) < 0)
1633 nf_nat_decode_session(skb, &fl, family);
1635 /* First, check used SA against their selectors. */
1639 for (i=skb->sp->len-1; i>=0; i--) {
1640 struct xfrm_state *x = skb->sp->xvec[i];
1641 if (!xfrm_selector_match(&x->sel, &fl, family))
1647 if (sk && sk->sk_policy[dir]) {
1648 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1654 pol = flow_cache_lookup(&fl, family, fl_dir,
1655 xfrm_policy_lookup);
1661 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1662 xfrm_secpath_reject(xerr_idx, skb, &fl);
1668 pol->curlft.use_time = get_seconds();
1672 #ifdef CONFIG_XFRM_SUB_POLICY
1673 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1674 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
1678 if (IS_ERR(pols[1]))
1680 pols[1]->curlft.use_time = get_seconds();
1686 if (pol->action == XFRM_POLICY_ALLOW) {
1687 struct sec_path *sp;
1688 static struct sec_path dummy;
1689 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
1690 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
1691 struct xfrm_tmpl **tpp = tp;
1695 if ((sp = skb->sp) == NULL)
1698 for (pi = 0; pi < npols; pi++) {
1699 if (pols[pi] != pol &&
1700 pols[pi]->action != XFRM_POLICY_ALLOW)
1702 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
1704 for (i = 0; i < pols[pi]->xfrm_nr; i++)
1705 tpp[ti++] = &pols[pi]->xfrm_vec[i];
1709 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
1713 /* For each tunnel xfrm, find the first matching tmpl.
1714 * For each tmpl before that, find corresponding xfrm.
1715 * Order is _important_. Later we will implement
1716 * some barriers, but at the moment barriers
1717 * are implied between each two transformations.
1719 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
1720 k = xfrm_policy_ok(tpp[i], sp, k, family);
1723 /* "-2 - errored_index" returned */
1729 if (secpath_has_nontransport(sp, k, &xerr_idx))
1732 xfrm_pols_put(pols, npols);
1737 xfrm_secpath_reject(xerr_idx, skb, &fl);
1739 xfrm_pols_put(pols, npols);
1742 EXPORT_SYMBOL(__xfrm_policy_check);
1744 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1748 if (xfrm_decode_session(skb, &fl, family) < 0)
1751 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1753 EXPORT_SYMBOL(__xfrm_route_forward);
1755 /* Optimize later using cookies and generation ids. */
1757 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1759 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1760 * to "-1" to force all XFRM destinations to get validated by
1761 * dst_ops->check on every use. We do this because when a
1762 * normal route referenced by an XFRM dst is obsoleted we do
1763 * not go looking around for all parent referencing XFRM dsts
1764 * so that we can invalidate them. It is just too much work.
1765 * Instead we make the checks here on every use. For example:
1767 * XFRM dst A --> IPv4 dst X
1769 * X is the "xdst->route" of A (X is also the "dst->path" of A
1770 * in this example). If X is marked obsolete, "A" will not
1771 * notice. That's what we are validating here via the
1772 * stale_bundle() check.
1774 * When a policy's bundle is pruned, we dst_free() the XFRM
1775 * dst which causes it's ->obsolete field to be set to a
1776 * positive non-zero integer. If an XFRM dst has been pruned
1777 * like this, we want to force a new route lookup.
1779 if (dst->obsolete < 0 && !stale_bundle(dst))
1785 static int stale_bundle(struct dst_entry *dst)
1787 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
1790 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
1792 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1793 dst->dev = init_net.loopback_dev;
1798 EXPORT_SYMBOL(xfrm_dst_ifdown);
1800 static void xfrm_link_failure(struct sk_buff *skb)
1802 /* Impossible. Such dst must be popped before reaches point of failure. */
1806 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1809 if (dst->obsolete) {
1817 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
1819 struct dst_entry *dst, **dstp;
1821 write_lock(&pol->lock);
1822 dstp = &pol->bundles;
1823 while ((dst=*dstp) != NULL) {
1826 dst->next = *gc_list_p;
1832 write_unlock(&pol->lock);
1835 static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1837 struct dst_entry *gc_list = NULL;
1840 read_lock_bh(&xfrm_policy_lock);
1841 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
1842 struct xfrm_policy *pol;
1843 struct hlist_node *entry;
1844 struct hlist_head *table;
1847 hlist_for_each_entry(pol, entry,
1848 &xfrm_policy_inexact[dir], bydst)
1849 prune_one_bundle(pol, func, &gc_list);
1851 table = xfrm_policy_bydst[dir].table;
1852 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
1853 hlist_for_each_entry(pol, entry, table + i, bydst)
1854 prune_one_bundle(pol, func, &gc_list);
1857 read_unlock_bh(&xfrm_policy_lock);
1860 struct dst_entry *dst = gc_list;
1861 gc_list = dst->next;
1866 static int unused_bundle(struct dst_entry *dst)
1868 return !atomic_read(&dst->__refcnt);
1871 static void __xfrm_garbage_collect(void)
1873 xfrm_prune_bundles(unused_bundle);
1876 static int xfrm_flush_bundles(void)
1878 xfrm_prune_bundles(stale_bundle);
1882 void xfrm_init_pmtu(struct dst_entry *dst)
1885 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1886 u32 pmtu, route_mtu_cached;
1888 pmtu = dst_mtu(dst->child);
1889 xdst->child_mtu_cached = pmtu;
1891 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1893 route_mtu_cached = dst_mtu(xdst->route);
1894 xdst->route_mtu_cached = route_mtu_cached;
1896 if (pmtu > route_mtu_cached)
1897 pmtu = route_mtu_cached;
1899 dst->metrics[RTAX_MTU-1] = pmtu;
1900 } while ((dst = dst->next));
1903 EXPORT_SYMBOL(xfrm_init_pmtu);
1905 /* Check that the bundle accepts the flow and its components are
1909 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1910 struct flowi *fl, int family, int strict)
1912 struct dst_entry *dst = &first->u.dst;
1913 struct xfrm_dst *last;
1916 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1917 (dst->dev && !netif_running(dst->dev)))
1919 #ifdef CONFIG_XFRM_SUB_POLICY
1921 if (first->origin && !flow_cache_uli_match(first->origin, fl))
1923 if (first->partner &&
1924 !xfrm_selector_match(first->partner, fl, family))
1932 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1934 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1937 !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
1939 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1941 if (xdst->genid != dst->xfrm->genid)
1945 !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1946 !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
1949 mtu = dst_mtu(dst->child);
1950 if (xdst->child_mtu_cached != mtu) {
1952 xdst->child_mtu_cached = mtu;
1955 if (!dst_check(xdst->route, xdst->route_cookie))
1957 mtu = dst_mtu(xdst->route);
1958 if (xdst->route_mtu_cached != mtu) {
1960 xdst->route_mtu_cached = mtu;
1964 } while (dst->xfrm);
1969 mtu = last->child_mtu_cached;
1973 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1974 if (mtu > last->route_mtu_cached)
1975 mtu = last->route_mtu_cached;
1976 dst->metrics[RTAX_MTU-1] = mtu;
1981 last = (struct xfrm_dst *)last->u.dst.next;
1982 last->child_mtu_cached = mtu;
1988 EXPORT_SYMBOL(xfrm_bundle_ok);
1990 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1993 if (unlikely(afinfo == NULL))
1995 if (unlikely(afinfo->family >= NPROTO))
1996 return -EAFNOSUPPORT;
1997 write_lock_bh(&xfrm_policy_afinfo_lock);
1998 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2001 struct dst_ops *dst_ops = afinfo->dst_ops;
2002 if (likely(dst_ops->kmem_cachep == NULL))
2003 dst_ops->kmem_cachep = xfrm_dst_cache;
2004 if (likely(dst_ops->check == NULL))
2005 dst_ops->check = xfrm_dst_check;
2006 if (likely(dst_ops->negative_advice == NULL))
2007 dst_ops->negative_advice = xfrm_negative_advice;
2008 if (likely(dst_ops->link_failure == NULL))
2009 dst_ops->link_failure = xfrm_link_failure;
2010 if (likely(afinfo->garbage_collect == NULL))
2011 afinfo->garbage_collect = __xfrm_garbage_collect;
2012 xfrm_policy_afinfo[afinfo->family] = afinfo;
2014 write_unlock_bh(&xfrm_policy_afinfo_lock);
2017 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2019 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2022 if (unlikely(afinfo == NULL))
2024 if (unlikely(afinfo->family >= NPROTO))
2025 return -EAFNOSUPPORT;
2026 write_lock_bh(&xfrm_policy_afinfo_lock);
2027 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2028 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2031 struct dst_ops *dst_ops = afinfo->dst_ops;
2032 xfrm_policy_afinfo[afinfo->family] = NULL;
2033 dst_ops->kmem_cachep = NULL;
2034 dst_ops->check = NULL;
2035 dst_ops->negative_advice = NULL;
2036 dst_ops->link_failure = NULL;
2037 afinfo->garbage_collect = NULL;
2040 write_unlock_bh(&xfrm_policy_afinfo_lock);
2043 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2045 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2047 struct xfrm_policy_afinfo *afinfo;
2048 if (unlikely(family >= NPROTO))
2050 read_lock(&xfrm_policy_afinfo_lock);
2051 afinfo = xfrm_policy_afinfo[family];
2052 if (unlikely(!afinfo))
2053 read_unlock(&xfrm_policy_afinfo_lock);
2057 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2059 read_unlock(&xfrm_policy_afinfo_lock);
2062 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2064 struct net_device *dev = ptr;
2066 if (dev->nd_net != &init_net)
2071 xfrm_flush_bundles();
2076 static struct notifier_block xfrm_dev_notifier = {
2082 static void __init xfrm_policy_init(void)
2084 unsigned int hmask, sz;
2087 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2088 sizeof(struct xfrm_dst),
2089 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2093 sz = (hmask+1) * sizeof(struct hlist_head);
2095 xfrm_policy_byidx = xfrm_hash_alloc(sz);
2096 xfrm_idx_hmask = hmask;
2097 if (!xfrm_policy_byidx)
2098 panic("XFRM: failed to allocate byidx hash\n");
2100 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2101 struct xfrm_policy_hash *htab;
2103 INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
2105 htab = &xfrm_policy_bydst[dir];
2106 htab->table = xfrm_hash_alloc(sz);
2107 htab->hmask = hmask;
2109 panic("XFRM: failed to allocate bydst hash\n");
2112 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2113 register_netdevice_notifier(&xfrm_dev_notifier);
2116 void __init xfrm_init(void)
2123 #ifdef CONFIG_AUDITSYSCALL
2124 static inline void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2125 struct audit_buffer *audit_buf)
2128 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2129 xp->security->ctx_alg, xp->security->ctx_doi,
2130 xp->security->ctx_str);
2132 switch(xp->selector.family) {
2134 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2135 NIPQUAD(xp->selector.saddr.a4),
2136 NIPQUAD(xp->selector.daddr.a4));
2140 struct in6_addr saddr6, daddr6;
2142 memcpy(&saddr6, xp->selector.saddr.a6,
2143 sizeof(struct in6_addr));
2144 memcpy(&daddr6, xp->selector.daddr.a6,
2145 sizeof(struct in6_addr));
2146 audit_log_format(audit_buf,
2147 " src=" NIP6_FMT " dst=" NIP6_FMT,
2148 NIP6(saddr6), NIP6(daddr6));
2155 xfrm_audit_policy_add(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
2157 struct audit_buffer *audit_buf;
2158 extern int audit_enabled;
2160 if (audit_enabled == 0)
2162 audit_buf = xfrm_audit_start(sid, auid);
2163 if (audit_buf == NULL)
2165 audit_log_format(audit_buf, " op=SPD-add res=%u", result);
2166 xfrm_audit_common_policyinfo(xp, audit_buf);
2167 audit_log_end(audit_buf);
2169 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2172 xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
2174 struct audit_buffer *audit_buf;
2175 extern int audit_enabled;
2177 if (audit_enabled == 0)
2179 audit_buf = xfrm_audit_start(sid, auid);
2180 if (audit_buf == NULL)
2182 audit_log_format(audit_buf, " op=SPD-delete res=%u", result);
2183 xfrm_audit_common_policyinfo(xp, audit_buf);
2184 audit_log_end(audit_buf);
2186 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2189 #ifdef CONFIG_XFRM_MIGRATE
2190 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2191 struct xfrm_selector *sel_tgt)
2193 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2194 if (sel_tgt->family == sel_cmp->family &&
2195 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2196 sel_cmp->family) == 0 &&
2197 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2198 sel_cmp->family) == 0 &&
2199 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2200 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2204 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2211 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2214 struct xfrm_policy *pol, *ret = NULL;
2215 struct hlist_node *entry;
2216 struct hlist_head *chain;
2219 read_lock_bh(&xfrm_policy_lock);
2220 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
2221 hlist_for_each_entry(pol, entry, chain, bydst) {
2222 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2223 pol->type == type) {
2225 priority = ret->priority;
2229 chain = &xfrm_policy_inexact[dir];
2230 hlist_for_each_entry(pol, entry, chain, bydst) {
2231 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2232 pol->type == type &&
2233 pol->priority < priority) {
2242 read_unlock_bh(&xfrm_policy_lock);
2247 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2251 if (t->mode == m->mode && t->id.proto == m->proto &&
2252 (m->reqid == 0 || t->reqid == m->reqid)) {
2254 case XFRM_MODE_TUNNEL:
2255 case XFRM_MODE_BEET:
2256 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2257 m->old_family) == 0 &&
2258 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2259 m->old_family) == 0) {
2263 case XFRM_MODE_TRANSPORT:
2264 /* in case of transport mode, template does not store
2265 any IP addresses, hence we just compare mode and
2276 /* update endpoint address(es) of template(s) */
2277 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2278 struct xfrm_migrate *m, int num_migrate)
2280 struct xfrm_migrate *mp;
2281 struct dst_entry *dst;
2284 write_lock_bh(&pol->lock);
2285 if (unlikely(pol->dead)) {
2286 /* target policy has been deleted */
2287 write_unlock_bh(&pol->lock);
2291 for (i = 0; i < pol->xfrm_nr; i++) {
2292 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2293 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2296 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2297 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2299 /* update endpoints */
2300 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2301 sizeof(pol->xfrm_vec[i].id.daddr));
2302 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2303 sizeof(pol->xfrm_vec[i].saddr));
2304 pol->xfrm_vec[i].encap_family = mp->new_family;
2306 while ((dst = pol->bundles) != NULL) {
2307 pol->bundles = dst->next;
2313 write_unlock_bh(&pol->lock);
2321 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2325 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2328 for (i = 0; i < num_migrate; i++) {
2329 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2330 m[i].old_family) == 0) &&
2331 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2332 m[i].old_family) == 0))
2334 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2335 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2338 /* check if there is any duplicated entry */
2339 for (j = i + 1; j < num_migrate; j++) {
2340 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2341 sizeof(m[i].old_daddr)) &&
2342 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2343 sizeof(m[i].old_saddr)) &&
2344 m[i].proto == m[j].proto &&
2345 m[i].mode == m[j].mode &&
2346 m[i].reqid == m[j].reqid &&
2347 m[i].old_family == m[j].old_family)
2355 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2356 struct xfrm_migrate *m, int num_migrate)
2358 int i, err, nx_cur = 0, nx_new = 0;
2359 struct xfrm_policy *pol = NULL;
2360 struct xfrm_state *x, *xc;
2361 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2362 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2363 struct xfrm_migrate *mp;
2365 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2368 /* Stage 1 - find policy */
2369 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2374 /* Stage 2 - find and update state(s) */
2375 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2376 if ((x = xfrm_migrate_state_find(mp))) {
2379 if ((xc = xfrm_state_migrate(x, mp))) {
2389 /* Stage 3 - update policy */
2390 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2393 /* Stage 4 - delete old state(s) */
2395 xfrm_states_put(x_cur, nx_cur);
2396 xfrm_states_delete(x_cur, nx_cur);
2399 /* Stage 5 - announce */
2400 km_migrate(sel, dir, type, m, num_migrate);
2412 xfrm_states_put(x_cur, nx_cur);
2414 xfrm_states_delete(x_new, nx_new);
2418 EXPORT_SYMBOL(xfrm_migrate);