6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
62 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
65 #ifdef CONFIG_AUDITSYSCALL
66 static void xfrm_audit_state_replay(struct xfrm_state *x,
67 struct sk_buff *skb, __be32 net_seq);
69 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
70 #endif /* CONFIG_AUDITSYSCALL */
72 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
75 unsigned short family)
77 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
80 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
81 xfrm_address_t *saddr,
82 unsigned short family)
84 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
87 static inline unsigned int
88 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
90 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
93 static void xfrm_hash_transfer(struct hlist_head *list,
94 struct hlist_head *ndsttable,
95 struct hlist_head *nsrctable,
96 struct hlist_head *nspitable,
97 unsigned int nhashmask)
99 struct hlist_node *entry, *tmp;
100 struct xfrm_state *x;
102 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
105 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
106 x->props.reqid, x->props.family,
108 hlist_add_head(&x->bydst, ndsttable+h);
110 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
113 hlist_add_head(&x->bysrc, nsrctable+h);
116 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
117 x->id.proto, x->props.family,
119 hlist_add_head(&x->byspi, nspitable+h);
124 static unsigned long xfrm_hash_new_size(void)
126 return ((xfrm_state_hmask + 1) << 1) *
127 sizeof(struct hlist_head);
130 static DEFINE_MUTEX(hash_resize_mutex);
132 static void xfrm_hash_resize(struct work_struct *__unused)
134 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
135 unsigned long nsize, osize;
136 unsigned int nhashmask, ohashmask;
139 mutex_lock(&hash_resize_mutex);
141 nsize = xfrm_hash_new_size();
142 ndst = xfrm_hash_alloc(nsize);
145 nsrc = xfrm_hash_alloc(nsize);
147 xfrm_hash_free(ndst, nsize);
150 nspi = xfrm_hash_alloc(nsize);
152 xfrm_hash_free(ndst, nsize);
153 xfrm_hash_free(nsrc, nsize);
157 spin_lock_bh(&xfrm_state_lock);
159 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
160 for (i = xfrm_state_hmask; i >= 0; i--)
161 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
164 odst = xfrm_state_bydst;
165 osrc = xfrm_state_bysrc;
166 ospi = xfrm_state_byspi;
167 ohashmask = xfrm_state_hmask;
169 xfrm_state_bydst = ndst;
170 xfrm_state_bysrc = nsrc;
171 xfrm_state_byspi = nspi;
172 xfrm_state_hmask = nhashmask;
174 spin_unlock_bh(&xfrm_state_lock);
176 osize = (ohashmask + 1) * sizeof(struct hlist_head);
177 xfrm_hash_free(odst, osize);
178 xfrm_hash_free(osrc, osize);
179 xfrm_hash_free(ospi, osize);
182 mutex_unlock(&hash_resize_mutex);
185 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
187 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
188 EXPORT_SYMBOL(km_waitq);
190 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
193 static struct work_struct xfrm_state_gc_work;
194 static HLIST_HEAD(xfrm_state_gc_list);
195 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
197 int __xfrm_state_delete(struct xfrm_state *x);
199 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
200 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
202 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
204 struct xfrm_state_afinfo *afinfo;
205 if (unlikely(family >= NPROTO))
207 write_lock_bh(&xfrm_state_afinfo_lock);
208 afinfo = xfrm_state_afinfo[family];
209 if (unlikely(!afinfo))
210 write_unlock_bh(&xfrm_state_afinfo_lock);
214 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 __releases(xfrm_state_afinfo_lock)
217 write_unlock_bh(&xfrm_state_afinfo_lock);
220 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
230 if (likely(typemap[type->proto] == NULL))
231 typemap[type->proto] = type;
234 xfrm_state_unlock_afinfo(afinfo);
237 EXPORT_SYMBOL(xfrm_register_type);
239 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
241 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
242 const struct xfrm_type **typemap;
245 if (unlikely(afinfo == NULL))
246 return -EAFNOSUPPORT;
247 typemap = afinfo->type_map;
249 if (unlikely(typemap[type->proto] != type))
252 typemap[type->proto] = NULL;
253 xfrm_state_unlock_afinfo(afinfo);
256 EXPORT_SYMBOL(xfrm_unregister_type);
258 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
260 struct xfrm_state_afinfo *afinfo;
261 const struct xfrm_type **typemap;
262 const struct xfrm_type *type;
263 int modload_attempted = 0;
266 afinfo = xfrm_state_get_afinfo(family);
267 if (unlikely(afinfo == NULL))
269 typemap = afinfo->type_map;
271 type = typemap[proto];
272 if (unlikely(type && !try_module_get(type->owner)))
274 if (!type && !modload_attempted) {
275 xfrm_state_put_afinfo(afinfo);
276 request_module("xfrm-type-%d-%d", family, proto);
277 modload_attempted = 1;
281 xfrm_state_put_afinfo(afinfo);
285 static void xfrm_put_type(const struct xfrm_type *type)
287 module_put(type->owner);
290 int xfrm_register_mode(struct xfrm_mode *mode, int family)
292 struct xfrm_state_afinfo *afinfo;
293 struct xfrm_mode **modemap;
296 if (unlikely(mode->encap >= XFRM_MODE_MAX))
299 afinfo = xfrm_state_lock_afinfo(family);
300 if (unlikely(afinfo == NULL))
301 return -EAFNOSUPPORT;
304 modemap = afinfo->mode_map;
305 if (modemap[mode->encap])
309 if (!try_module_get(afinfo->owner))
312 mode->afinfo = afinfo;
313 modemap[mode->encap] = mode;
317 xfrm_state_unlock_afinfo(afinfo);
320 EXPORT_SYMBOL(xfrm_register_mode);
322 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
324 struct xfrm_state_afinfo *afinfo;
325 struct xfrm_mode **modemap;
328 if (unlikely(mode->encap >= XFRM_MODE_MAX))
331 afinfo = xfrm_state_lock_afinfo(family);
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
336 modemap = afinfo->mode_map;
337 if (likely(modemap[mode->encap] == mode)) {
338 modemap[mode->encap] = NULL;
339 module_put(mode->afinfo->owner);
343 xfrm_state_unlock_afinfo(afinfo);
346 EXPORT_SYMBOL(xfrm_unregister_mode);
348 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
350 struct xfrm_state_afinfo *afinfo;
351 struct xfrm_mode *mode;
352 int modload_attempted = 0;
354 if (unlikely(encap >= XFRM_MODE_MAX))
358 afinfo = xfrm_state_get_afinfo(family);
359 if (unlikely(afinfo == NULL))
362 mode = afinfo->mode_map[encap];
363 if (unlikely(mode && !try_module_get(mode->owner)))
365 if (!mode && !modload_attempted) {
366 xfrm_state_put_afinfo(afinfo);
367 request_module("xfrm-mode-%d-%d", family, encap);
368 modload_attempted = 1;
372 xfrm_state_put_afinfo(afinfo);
376 static void xfrm_put_mode(struct xfrm_mode *mode)
378 module_put(mode->owner);
381 static void xfrm_state_gc_destroy(struct xfrm_state *x)
383 del_timer_sync(&x->timer);
384 del_timer_sync(&x->rtimer);
391 xfrm_put_mode(x->inner_mode);
393 xfrm_put_mode(x->outer_mode);
395 x->type->destructor(x);
396 xfrm_put_type(x->type);
398 security_xfrm_state_free(x);
402 static void xfrm_state_gc_task(struct work_struct *data)
404 struct xfrm_state *x;
405 struct hlist_node *entry, *tmp;
406 struct hlist_head gc_list;
408 spin_lock_bh(&xfrm_state_gc_lock);
409 gc_list.first = xfrm_state_gc_list.first;
410 INIT_HLIST_HEAD(&xfrm_state_gc_list);
411 spin_unlock_bh(&xfrm_state_gc_lock);
413 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
414 xfrm_state_gc_destroy(x);
419 static inline unsigned long make_jiffies(long secs)
421 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
422 return MAX_SCHEDULE_TIMEOUT-1;
427 static void xfrm_timer_handler(unsigned long data)
429 struct xfrm_state *x = (struct xfrm_state*)data;
430 unsigned long now = get_seconds();
431 long next = LONG_MAX;
436 if (x->km.state == XFRM_STATE_DEAD)
438 if (x->km.state == XFRM_STATE_EXPIRED)
440 if (x->lft.hard_add_expires_seconds) {
441 long tmo = x->lft.hard_add_expires_seconds +
442 x->curlft.add_time - now;
448 if (x->lft.hard_use_expires_seconds) {
449 long tmo = x->lft.hard_use_expires_seconds +
450 (x->curlft.use_time ? : now) - now;
458 if (x->lft.soft_add_expires_seconds) {
459 long tmo = x->lft.soft_add_expires_seconds +
460 x->curlft.add_time - now;
466 if (x->lft.soft_use_expires_seconds) {
467 long tmo = x->lft.soft_use_expires_seconds +
468 (x->curlft.use_time ? : now) - now;
477 km_state_expired(x, 0, 0);
479 if (next != LONG_MAX)
480 mod_timer(&x->timer, jiffies + make_jiffies(next));
485 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
486 x->km.state = XFRM_STATE_EXPIRED;
492 err = __xfrm_state_delete(x);
493 if (!err && x->id.spi)
494 km_state_expired(x, 1, 0);
496 xfrm_audit_state_delete(x, err ? 0 : 1,
497 audit_get_loginuid(current), 0);
500 spin_unlock(&x->lock);
503 static void xfrm_replay_timer_handler(unsigned long data);
505 struct xfrm_state *xfrm_state_alloc(void)
507 struct xfrm_state *x;
509 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
512 atomic_set(&x->refcnt, 1);
513 atomic_set(&x->tunnel_users, 0);
514 INIT_LIST_HEAD(&x->all);
515 INIT_HLIST_NODE(&x->bydst);
516 INIT_HLIST_NODE(&x->bysrc);
517 INIT_HLIST_NODE(&x->byspi);
518 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
519 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
521 x->curlft.add_time = get_seconds();
522 x->lft.soft_byte_limit = XFRM_INF;
523 x->lft.soft_packet_limit = XFRM_INF;
524 x->lft.hard_byte_limit = XFRM_INF;
525 x->lft.hard_packet_limit = XFRM_INF;
526 x->replay_maxage = 0;
527 x->replay_maxdiff = 0;
528 spin_lock_init(&x->lock);
532 EXPORT_SYMBOL(xfrm_state_alloc);
534 void __xfrm_state_destroy(struct xfrm_state *x)
536 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
538 spin_lock_bh(&xfrm_state_lock);
540 spin_unlock_bh(&xfrm_state_lock);
542 spin_lock_bh(&xfrm_state_gc_lock);
543 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
544 spin_unlock_bh(&xfrm_state_gc_lock);
545 schedule_work(&xfrm_state_gc_work);
547 EXPORT_SYMBOL(__xfrm_state_destroy);
549 int __xfrm_state_delete(struct xfrm_state *x)
553 if (x->km.state != XFRM_STATE_DEAD) {
554 x->km.state = XFRM_STATE_DEAD;
555 spin_lock(&xfrm_state_lock);
556 hlist_del(&x->bydst);
557 hlist_del(&x->bysrc);
559 hlist_del(&x->byspi);
561 spin_unlock(&xfrm_state_lock);
563 /* All xfrm_state objects are created by xfrm_state_alloc.
564 * The xfrm_state_alloc call gives a reference, and that
565 * is what we are dropping here.
573 EXPORT_SYMBOL(__xfrm_state_delete);
575 int xfrm_state_delete(struct xfrm_state *x)
579 spin_lock_bh(&x->lock);
580 err = __xfrm_state_delete(x);
581 spin_unlock_bh(&x->lock);
585 EXPORT_SYMBOL(xfrm_state_delete);
587 #ifdef CONFIG_SECURITY_NETWORK_XFRM
589 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
593 for (i = 0; i <= xfrm_state_hmask; i++) {
594 struct hlist_node *entry;
595 struct xfrm_state *x;
597 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
598 if (xfrm_id_proto_match(x->id.proto, proto) &&
599 (err = security_xfrm_state_delete(x)) != 0) {
600 xfrm_audit_state_delete(x, 0,
601 audit_info->loginuid,
612 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
618 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
622 spin_lock_bh(&xfrm_state_lock);
623 err = xfrm_state_flush_secctx_check(proto, audit_info);
627 for (i = 0; i <= xfrm_state_hmask; i++) {
628 struct hlist_node *entry;
629 struct xfrm_state *x;
631 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
632 if (!xfrm_state_kern(x) &&
633 xfrm_id_proto_match(x->id.proto, proto)) {
635 spin_unlock_bh(&xfrm_state_lock);
637 err = xfrm_state_delete(x);
638 xfrm_audit_state_delete(x, err ? 0 : 1,
639 audit_info->loginuid,
643 spin_lock_bh(&xfrm_state_lock);
651 spin_unlock_bh(&xfrm_state_lock);
655 EXPORT_SYMBOL(xfrm_state_flush);
657 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
659 spin_lock_bh(&xfrm_state_lock);
660 si->sadcnt = xfrm_state_num;
661 si->sadhcnt = xfrm_state_hmask;
662 si->sadhmcnt = xfrm_state_hashmax;
663 spin_unlock_bh(&xfrm_state_lock);
665 EXPORT_SYMBOL(xfrm_sad_getinfo);
668 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
669 struct xfrm_tmpl *tmpl,
670 xfrm_address_t *daddr, xfrm_address_t *saddr,
671 unsigned short family)
673 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
676 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
677 xfrm_state_put_afinfo(afinfo);
681 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
683 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
684 struct xfrm_state *x;
685 struct hlist_node *entry;
687 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
688 if (x->props.family != family ||
690 x->id.proto != proto)
695 if (x->id.daddr.a4 != daddr->a4)
699 if (!ipv6_addr_equal((struct in6_addr *)daddr,
713 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
715 unsigned int h = xfrm_src_hash(daddr, saddr, family);
716 struct xfrm_state *x;
717 struct hlist_node *entry;
719 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
720 if (x->props.family != family ||
721 x->id.proto != proto)
726 if (x->id.daddr.a4 != daddr->a4 ||
727 x->props.saddr.a4 != saddr->a4)
731 if (!ipv6_addr_equal((struct in6_addr *)daddr,
734 !ipv6_addr_equal((struct in6_addr *)saddr,
748 static inline struct xfrm_state *
749 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
752 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
753 x->id.proto, family);
755 return __xfrm_state_lookup_byaddr(&x->id.daddr,
757 x->id.proto, family);
760 static void xfrm_hash_grow_check(int have_hash_collision)
762 if (have_hash_collision &&
763 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
764 xfrm_state_num > xfrm_state_hmask)
765 schedule_work(&xfrm_hash_work);
769 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
770 struct flowi *fl, struct xfrm_tmpl *tmpl,
771 struct xfrm_policy *pol, int *err,
772 unsigned short family)
775 struct hlist_node *entry;
776 struct xfrm_state *x, *x0;
777 int acquire_in_progress = 0;
779 struct xfrm_state *best = NULL;
781 spin_lock_bh(&xfrm_state_lock);
782 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
783 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
784 if (x->props.family == family &&
785 x->props.reqid == tmpl->reqid &&
786 !(x->props.flags & XFRM_STATE_WILDRECV) &&
787 xfrm_state_addr_check(x, daddr, saddr, family) &&
788 tmpl->mode == x->props.mode &&
789 tmpl->id.proto == x->id.proto &&
790 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
792 1. There is a valid state with matching selector.
794 2. Valid state with inappropriate selector. Skip.
796 Entering area of "sysdeps".
798 3. If state is not valid, selector is temporary,
799 it selects only session which triggered
800 previous resolution. Key manager will do
801 something to install a state with proper
804 if (x->km.state == XFRM_STATE_VALID) {
805 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
806 !security_xfrm_state_pol_flow_match(x, pol, fl))
809 best->km.dying > x->km.dying ||
810 (best->km.dying == x->km.dying &&
811 best->curlft.add_time < x->curlft.add_time))
813 } else if (x->km.state == XFRM_STATE_ACQ) {
814 acquire_in_progress = 1;
815 } else if (x->km.state == XFRM_STATE_ERROR ||
816 x->km.state == XFRM_STATE_EXPIRED) {
817 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
818 security_xfrm_state_pol_flow_match(x, pol, fl))
825 if (!x && !error && !acquire_in_progress) {
827 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
828 tmpl->id.proto, family)) != NULL) {
833 x = xfrm_state_alloc();
838 /* Initialize temporary selector matching only
839 * to current session. */
840 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
842 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
844 x->km.state = XFRM_STATE_DEAD;
850 if (km_query(x, tmpl, pol) == 0) {
851 x->km.state = XFRM_STATE_ACQ;
852 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
853 h = xfrm_src_hash(daddr, saddr, family);
854 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
856 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
857 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
859 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
860 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
861 add_timer(&x->timer);
863 xfrm_hash_grow_check(x->bydst.next != NULL);
865 x->km.state = XFRM_STATE_DEAD;
875 *err = acquire_in_progress ? -EAGAIN : error;
876 spin_unlock_bh(&xfrm_state_lock);
881 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
882 unsigned short family, u8 mode, u8 proto, u32 reqid)
885 struct xfrm_state *rx = NULL, *x = NULL;
886 struct hlist_node *entry;
888 spin_lock(&xfrm_state_lock);
889 h = xfrm_dst_hash(daddr, saddr, reqid, family);
890 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
891 if (x->props.family == family &&
892 x->props.reqid == reqid &&
893 !(x->props.flags & XFRM_STATE_WILDRECV) &&
894 xfrm_state_addr_check(x, daddr, saddr, family) &&
895 mode == x->props.mode &&
896 proto == x->id.proto &&
897 x->km.state == XFRM_STATE_VALID) {
905 spin_unlock(&xfrm_state_lock);
910 EXPORT_SYMBOL(xfrm_stateonly_find);
912 static void __xfrm_state_insert(struct xfrm_state *x)
916 x->genid = ++xfrm_state_genid;
918 list_add_tail(&x->all, &xfrm_state_all);
920 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
921 x->props.reqid, x->props.family);
922 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
924 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
925 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
928 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
931 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
934 mod_timer(&x->timer, jiffies + HZ);
935 if (x->replay_maxage)
936 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
942 xfrm_hash_grow_check(x->bydst.next != NULL);
945 /* xfrm_state_lock is held */
946 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
948 unsigned short family = xnew->props.family;
949 u32 reqid = xnew->props.reqid;
950 struct xfrm_state *x;
951 struct hlist_node *entry;
954 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
955 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
956 if (x->props.family == family &&
957 x->props.reqid == reqid &&
958 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
959 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
960 x->genid = xfrm_state_genid;
964 void xfrm_state_insert(struct xfrm_state *x)
966 spin_lock_bh(&xfrm_state_lock);
967 __xfrm_state_bump_genids(x);
968 __xfrm_state_insert(x);
969 spin_unlock_bh(&xfrm_state_lock);
971 EXPORT_SYMBOL(xfrm_state_insert);
973 /* xfrm_state_lock is held */
974 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
976 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
977 struct hlist_node *entry;
978 struct xfrm_state *x;
980 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
981 if (x->props.reqid != reqid ||
982 x->props.mode != mode ||
983 x->props.family != family ||
984 x->km.state != XFRM_STATE_ACQ ||
986 x->id.proto != proto)
991 if (x->id.daddr.a4 != daddr->a4 ||
992 x->props.saddr.a4 != saddr->a4)
996 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
997 (struct in6_addr *)daddr) ||
998 !ipv6_addr_equal((struct in6_addr *)
1000 (struct in6_addr *)saddr))
1012 x = xfrm_state_alloc();
1016 x->sel.daddr.a4 = daddr->a4;
1017 x->sel.saddr.a4 = saddr->a4;
1018 x->sel.prefixlen_d = 32;
1019 x->sel.prefixlen_s = 32;
1020 x->props.saddr.a4 = saddr->a4;
1021 x->id.daddr.a4 = daddr->a4;
1025 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1026 (struct in6_addr *)daddr);
1027 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1028 (struct in6_addr *)saddr);
1029 x->sel.prefixlen_d = 128;
1030 x->sel.prefixlen_s = 128;
1031 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1032 (struct in6_addr *)saddr);
1033 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1034 (struct in6_addr *)daddr);
1038 x->km.state = XFRM_STATE_ACQ;
1039 x->id.proto = proto;
1040 x->props.family = family;
1041 x->props.mode = mode;
1042 x->props.reqid = reqid;
1043 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1045 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1046 add_timer(&x->timer);
1047 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1048 h = xfrm_src_hash(daddr, saddr, family);
1049 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1053 xfrm_hash_grow_check(x->bydst.next != NULL);
1059 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1061 int xfrm_state_add(struct xfrm_state *x)
1063 struct xfrm_state *x1;
1066 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1068 family = x->props.family;
1070 spin_lock_bh(&xfrm_state_lock);
1072 x1 = __xfrm_state_locate(x, use_spi, family);
1080 if (use_spi && x->km.seq) {
1081 x1 = __xfrm_find_acq_byseq(x->km.seq);
1082 if (x1 && ((x1->id.proto != x->id.proto) ||
1083 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1090 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1092 &x->id.daddr, &x->props.saddr, 0);
1094 __xfrm_state_bump_genids(x);
1095 __xfrm_state_insert(x);
1099 spin_unlock_bh(&xfrm_state_lock);
1102 xfrm_state_delete(x1);
1108 EXPORT_SYMBOL(xfrm_state_add);
1110 #ifdef CONFIG_XFRM_MIGRATE
1111 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1114 struct xfrm_state *x = xfrm_state_alloc();
1118 memcpy(&x->id, &orig->id, sizeof(x->id));
1119 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1120 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1121 x->props.mode = orig->props.mode;
1122 x->props.replay_window = orig->props.replay_window;
1123 x->props.reqid = orig->props.reqid;
1124 x->props.family = orig->props.family;
1125 x->props.saddr = orig->props.saddr;
1128 x->aalg = xfrm_algo_clone(orig->aalg);
1132 x->props.aalgo = orig->props.aalgo;
1135 x->ealg = xfrm_algo_clone(orig->ealg);
1139 x->props.ealgo = orig->props.ealgo;
1142 x->calg = xfrm_algo_clone(orig->calg);
1146 x->props.calgo = orig->props.calgo;
1149 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1155 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1161 err = xfrm_init_state(x);
1165 x->props.flags = orig->props.flags;
1167 x->curlft.add_time = orig->curlft.add_time;
1168 x->km.state = orig->km.state;
1169 x->km.seq = orig->km.seq;
1187 /* xfrm_state_lock is held */
1188 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1191 struct xfrm_state *x;
1192 struct hlist_node *entry;
1195 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1196 m->reqid, m->old_family);
1197 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1198 if (x->props.mode != m->mode ||
1199 x->id.proto != m->proto)
1201 if (m->reqid && x->props.reqid != m->reqid)
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1212 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1214 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1215 if (x->props.mode != m->mode ||
1216 x->id.proto != m->proto)
1218 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1220 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1230 EXPORT_SYMBOL(xfrm_migrate_state_find);
1232 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1233 struct xfrm_migrate *m)
1235 struct xfrm_state *xc;
1238 xc = xfrm_state_clone(x, &err);
1242 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1243 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1246 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1247 /* a care is needed when the destination address of the
1248 state is to be updated as it is a part of triplet */
1249 xfrm_state_insert(xc);
1251 if ((err = xfrm_state_add(xc)) < 0)
1260 EXPORT_SYMBOL(xfrm_state_migrate);
1263 int xfrm_state_update(struct xfrm_state *x)
1265 struct xfrm_state *x1;
1267 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1269 spin_lock_bh(&xfrm_state_lock);
1270 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1276 if (xfrm_state_kern(x1)) {
1282 if (x1->km.state == XFRM_STATE_ACQ) {
1283 __xfrm_state_insert(x);
1289 spin_unlock_bh(&xfrm_state_lock);
1295 xfrm_state_delete(x1);
1301 spin_lock_bh(&x1->lock);
1302 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1303 if (x->encap && x1->encap)
1304 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1305 if (x->coaddr && x1->coaddr) {
1306 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1308 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1309 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1310 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1313 mod_timer(&x1->timer, jiffies + HZ);
1314 if (x1->curlft.use_time)
1315 xfrm_state_check_expire(x1);
1319 spin_unlock_bh(&x1->lock);
1325 EXPORT_SYMBOL(xfrm_state_update);
1327 int xfrm_state_check_expire(struct xfrm_state *x)
1329 if (!x->curlft.use_time)
1330 x->curlft.use_time = get_seconds();
1332 if (x->km.state != XFRM_STATE_VALID)
1335 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1336 x->curlft.packets >= x->lft.hard_packet_limit) {
1337 x->km.state = XFRM_STATE_EXPIRED;
1338 mod_timer(&x->timer, jiffies);
1343 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1344 x->curlft.packets >= x->lft.soft_packet_limit)) {
1346 km_state_expired(x, 0, 0);
1350 EXPORT_SYMBOL(xfrm_state_check_expire);
1353 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1354 unsigned short family)
1356 struct xfrm_state *x;
1358 spin_lock_bh(&xfrm_state_lock);
1359 x = __xfrm_state_lookup(daddr, spi, proto, family);
1360 spin_unlock_bh(&xfrm_state_lock);
1363 EXPORT_SYMBOL(xfrm_state_lookup);
1366 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1367 u8 proto, unsigned short family)
1369 struct xfrm_state *x;
1371 spin_lock_bh(&xfrm_state_lock);
1372 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1373 spin_unlock_bh(&xfrm_state_lock);
1376 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1379 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1380 xfrm_address_t *daddr, xfrm_address_t *saddr,
1381 int create, unsigned short family)
1383 struct xfrm_state *x;
1385 spin_lock_bh(&xfrm_state_lock);
1386 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1387 spin_unlock_bh(&xfrm_state_lock);
1391 EXPORT_SYMBOL(xfrm_find_acq);
1393 #ifdef CONFIG_XFRM_SUB_POLICY
1395 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1396 unsigned short family)
1399 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1401 return -EAFNOSUPPORT;
1403 spin_lock_bh(&xfrm_state_lock);
1404 if (afinfo->tmpl_sort)
1405 err = afinfo->tmpl_sort(dst, src, n);
1406 spin_unlock_bh(&xfrm_state_lock);
1407 xfrm_state_put_afinfo(afinfo);
1410 EXPORT_SYMBOL(xfrm_tmpl_sort);
1413 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1414 unsigned short family)
1417 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1419 return -EAFNOSUPPORT;
1421 spin_lock_bh(&xfrm_state_lock);
1422 if (afinfo->state_sort)
1423 err = afinfo->state_sort(dst, src, n);
1424 spin_unlock_bh(&xfrm_state_lock);
1425 xfrm_state_put_afinfo(afinfo);
1428 EXPORT_SYMBOL(xfrm_state_sort);
1431 /* Silly enough, but I'm lazy to build resolution list */
1433 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1437 for (i = 0; i <= xfrm_state_hmask; i++) {
1438 struct hlist_node *entry;
1439 struct xfrm_state *x;
1441 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1442 if (x->km.seq == seq &&
1443 x->km.state == XFRM_STATE_ACQ) {
1452 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1454 struct xfrm_state *x;
1456 spin_lock_bh(&xfrm_state_lock);
1457 x = __xfrm_find_acq_byseq(seq);
1458 spin_unlock_bh(&xfrm_state_lock);
1461 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1463 u32 xfrm_get_acqseq(void)
1467 static DEFINE_SPINLOCK(acqseq_lock);
1469 spin_lock_bh(&acqseq_lock);
1470 res = (++acqseq ? : ++acqseq);
1471 spin_unlock_bh(&acqseq_lock);
1474 EXPORT_SYMBOL(xfrm_get_acqseq);
1476 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1479 struct xfrm_state *x0;
1481 __be32 minspi = htonl(low);
1482 __be32 maxspi = htonl(high);
1484 spin_lock_bh(&x->lock);
1485 if (x->km.state == XFRM_STATE_DEAD)
1494 if (minspi == maxspi) {
1495 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1503 for (h=0; h<high-low+1; h++) {
1504 spi = low + net_random()%(high-low+1);
1505 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1507 x->id.spi = htonl(spi);
1514 spin_lock_bh(&xfrm_state_lock);
1515 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1516 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1517 spin_unlock_bh(&xfrm_state_lock);
1523 spin_unlock_bh(&x->lock);
1527 EXPORT_SYMBOL(xfrm_alloc_spi);
1529 int xfrm_state_walk(struct xfrm_state_walk *walk,
1530 int (*func)(struct xfrm_state *, int, void*),
1533 struct xfrm_state *old, *x, *last = NULL;
1536 if (walk->state == NULL && walk->count != 0)
1539 old = x = walk->state;
1541 spin_lock_bh(&xfrm_state_lock);
1543 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1544 list_for_each_entry_from(x, &xfrm_state_all, all) {
1545 if (x->km.state == XFRM_STATE_DEAD)
1547 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1550 err = func(last, walk->count, data);
1552 xfrm_state_hold(last);
1560 if (walk->count == 0) {
1565 err = func(last, 0, data);
1567 spin_unlock_bh(&xfrm_state_lock);
1569 xfrm_state_put(old);
1572 EXPORT_SYMBOL(xfrm_state_walk);
1575 void xfrm_replay_notify(struct xfrm_state *x, int event)
1578 /* we send notify messages in case
1579 * 1. we updated on of the sequence numbers, and the seqno difference
1580 * is at least x->replay_maxdiff, in this case we also update the
1581 * timeout of our timer function
1582 * 2. if x->replay_maxage has elapsed since last update,
1583 * and there were changes
1585 * The state structure must be locked!
1589 case XFRM_REPLAY_UPDATE:
1590 if (x->replay_maxdiff &&
1591 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1592 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1593 if (x->xflags & XFRM_TIME_DEFER)
1594 event = XFRM_REPLAY_TIMEOUT;
1601 case XFRM_REPLAY_TIMEOUT:
1602 if ((x->replay.seq == x->preplay.seq) &&
1603 (x->replay.bitmap == x->preplay.bitmap) &&
1604 (x->replay.oseq == x->preplay.oseq)) {
1605 x->xflags |= XFRM_TIME_DEFER;
1612 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1613 c.event = XFRM_MSG_NEWAE;
1614 c.data.aevent = event;
1615 km_state_notify(x, &c);
1617 if (x->replay_maxage &&
1618 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1619 x->xflags &= ~XFRM_TIME_DEFER;
1622 static void xfrm_replay_timer_handler(unsigned long data)
1624 struct xfrm_state *x = (struct xfrm_state*)data;
1626 spin_lock(&x->lock);
1628 if (x->km.state == XFRM_STATE_VALID) {
1629 if (xfrm_aevent_is_on())
1630 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1632 x->xflags |= XFRM_TIME_DEFER;
1635 spin_unlock(&x->lock);
1638 int xfrm_replay_check(struct xfrm_state *x,
1639 struct sk_buff *skb, __be32 net_seq)
1642 u32 seq = ntohl(net_seq);
1644 if (unlikely(seq == 0))
1647 if (likely(seq > x->replay.seq))
1650 diff = x->replay.seq - seq;
1651 if (diff >= min_t(unsigned int, x->props.replay_window,
1652 sizeof(x->replay.bitmap) * 8)) {
1653 x->stats.replay_window++;
1657 if (x->replay.bitmap & (1U << diff)) {
1664 xfrm_audit_state_replay(x, skb, net_seq);
1668 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1671 u32 seq = ntohl(net_seq);
1673 if (seq > x->replay.seq) {
1674 diff = seq - x->replay.seq;
1675 if (diff < x->props.replay_window)
1676 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1678 x->replay.bitmap = 1;
1679 x->replay.seq = seq;
1681 diff = x->replay.seq - seq;
1682 x->replay.bitmap |= (1U << diff);
1685 if (xfrm_aevent_is_on())
1686 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1689 static LIST_HEAD(xfrm_km_list);
1690 static DEFINE_RWLOCK(xfrm_km_lock);
1692 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1694 struct xfrm_mgr *km;
1696 read_lock(&xfrm_km_lock);
1697 list_for_each_entry(km, &xfrm_km_list, list)
1698 if (km->notify_policy)
1699 km->notify_policy(xp, dir, c);
1700 read_unlock(&xfrm_km_lock);
1703 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1705 struct xfrm_mgr *km;
1706 read_lock(&xfrm_km_lock);
1707 list_for_each_entry(km, &xfrm_km_list, list)
1710 read_unlock(&xfrm_km_lock);
1713 EXPORT_SYMBOL(km_policy_notify);
1714 EXPORT_SYMBOL(km_state_notify);
1716 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1722 c.event = XFRM_MSG_EXPIRE;
1723 km_state_notify(x, &c);
1729 EXPORT_SYMBOL(km_state_expired);
1731 * We send to all registered managers regardless of failure
1732 * We are happy with one success
1734 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1736 int err = -EINVAL, acqret;
1737 struct xfrm_mgr *km;
1739 read_lock(&xfrm_km_lock);
1740 list_for_each_entry(km, &xfrm_km_list, list) {
1741 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1745 read_unlock(&xfrm_km_lock);
1748 EXPORT_SYMBOL(km_query);
1750 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1753 struct xfrm_mgr *km;
1755 read_lock(&xfrm_km_lock);
1756 list_for_each_entry(km, &xfrm_km_list, list) {
1757 if (km->new_mapping)
1758 err = km->new_mapping(x, ipaddr, sport);
1762 read_unlock(&xfrm_km_lock);
1765 EXPORT_SYMBOL(km_new_mapping);
1767 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1773 c.event = XFRM_MSG_POLEXPIRE;
1774 km_policy_notify(pol, dir, &c);
1779 EXPORT_SYMBOL(km_policy_expired);
1781 #ifdef CONFIG_XFRM_MIGRATE
1782 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1783 struct xfrm_migrate *m, int num_migrate)
1787 struct xfrm_mgr *km;
1789 read_lock(&xfrm_km_lock);
1790 list_for_each_entry(km, &xfrm_km_list, list) {
1792 ret = km->migrate(sel, dir, type, m, num_migrate);
1797 read_unlock(&xfrm_km_lock);
1800 EXPORT_SYMBOL(km_migrate);
1803 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1807 struct xfrm_mgr *km;
1809 read_lock(&xfrm_km_lock);
1810 list_for_each_entry(km, &xfrm_km_list, list) {
1812 ret = km->report(proto, sel, addr);
1817 read_unlock(&xfrm_km_lock);
1820 EXPORT_SYMBOL(km_report);
1822 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1826 struct xfrm_mgr *km;
1827 struct xfrm_policy *pol = NULL;
1829 if (optlen <= 0 || optlen > PAGE_SIZE)
1832 data = kmalloc(optlen, GFP_KERNEL);
1837 if (copy_from_user(data, optval, optlen))
1841 read_lock(&xfrm_km_lock);
1842 list_for_each_entry(km, &xfrm_km_list, list) {
1843 pol = km->compile_policy(sk, optname, data,
1848 read_unlock(&xfrm_km_lock);
1851 xfrm_sk_policy_insert(sk, err, pol);
1860 EXPORT_SYMBOL(xfrm_user_policy);
1862 int xfrm_register_km(struct xfrm_mgr *km)
1864 write_lock_bh(&xfrm_km_lock);
1865 list_add_tail(&km->list, &xfrm_km_list);
1866 write_unlock_bh(&xfrm_km_lock);
1869 EXPORT_SYMBOL(xfrm_register_km);
1871 int xfrm_unregister_km(struct xfrm_mgr *km)
1873 write_lock_bh(&xfrm_km_lock);
1874 list_del(&km->list);
1875 write_unlock_bh(&xfrm_km_lock);
1878 EXPORT_SYMBOL(xfrm_unregister_km);
1880 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1883 if (unlikely(afinfo == NULL))
1885 if (unlikely(afinfo->family >= NPROTO))
1886 return -EAFNOSUPPORT;
1887 write_lock_bh(&xfrm_state_afinfo_lock);
1888 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1891 xfrm_state_afinfo[afinfo->family] = afinfo;
1892 write_unlock_bh(&xfrm_state_afinfo_lock);
1895 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1897 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1900 if (unlikely(afinfo == NULL))
1902 if (unlikely(afinfo->family >= NPROTO))
1903 return -EAFNOSUPPORT;
1904 write_lock_bh(&xfrm_state_afinfo_lock);
1905 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1906 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1909 xfrm_state_afinfo[afinfo->family] = NULL;
1911 write_unlock_bh(&xfrm_state_afinfo_lock);
1914 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1916 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1918 struct xfrm_state_afinfo *afinfo;
1919 if (unlikely(family >= NPROTO))
1921 read_lock(&xfrm_state_afinfo_lock);
1922 afinfo = xfrm_state_afinfo[family];
1923 if (unlikely(!afinfo))
1924 read_unlock(&xfrm_state_afinfo_lock);
1928 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1929 __releases(xfrm_state_afinfo_lock)
1931 read_unlock(&xfrm_state_afinfo_lock);
1934 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1935 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1938 struct xfrm_state *t = x->tunnel;
1940 if (atomic_read(&t->tunnel_users) == 2)
1941 xfrm_state_delete(t);
1942 atomic_dec(&t->tunnel_users);
1947 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1949 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1953 spin_lock_bh(&x->lock);
1954 if (x->km.state == XFRM_STATE_VALID &&
1955 x->type && x->type->get_mtu)
1956 res = x->type->get_mtu(x, mtu);
1958 res = mtu - x->props.header_len;
1959 spin_unlock_bh(&x->lock);
1963 int xfrm_init_state(struct xfrm_state *x)
1965 struct xfrm_state_afinfo *afinfo;
1966 int family = x->props.family;
1969 err = -EAFNOSUPPORT;
1970 afinfo = xfrm_state_get_afinfo(family);
1975 if (afinfo->init_flags)
1976 err = afinfo->init_flags(x);
1978 xfrm_state_put_afinfo(afinfo);
1983 err = -EPROTONOSUPPORT;
1984 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1985 if (x->inner_mode == NULL)
1988 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1989 family != x->sel.family)
1992 x->type = xfrm_get_type(x->id.proto, family);
1993 if (x->type == NULL)
1996 err = x->type->init_state(x);
2000 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2001 if (x->outer_mode == NULL)
2004 x->km.state = XFRM_STATE_VALID;
2010 EXPORT_SYMBOL(xfrm_init_state);
2012 void __init xfrm_state_init(void)
2016 sz = sizeof(struct hlist_head) * 8;
2018 xfrm_state_bydst = xfrm_hash_alloc(sz);
2019 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2020 xfrm_state_byspi = xfrm_hash_alloc(sz);
2021 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2022 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2023 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2025 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2028 #ifdef CONFIG_AUDITSYSCALL
2029 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2030 struct audit_buffer *audit_buf)
2032 struct xfrm_sec_ctx *ctx = x->security;
2033 u32 spi = ntohl(x->id.spi);
2036 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2037 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2039 switch(x->props.family) {
2041 audit_log_format(audit_buf,
2042 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2043 NIPQUAD(x->props.saddr.a4),
2044 NIPQUAD(x->id.daddr.a4));
2047 audit_log_format(audit_buf,
2048 " src=" NIP6_FMT " dst=" NIP6_FMT,
2049 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2050 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2054 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2057 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2058 struct audit_buffer *audit_buf)
2061 struct ipv6hdr *iph6;
2066 audit_log_format(audit_buf,
2067 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2068 NIPQUAD(iph4->saddr),
2069 NIPQUAD(iph4->daddr));
2072 iph6 = ipv6_hdr(skb);
2073 audit_log_format(audit_buf,
2074 " src=" NIP6_FMT " dst=" NIP6_FMT
2075 " flowlbl=0x%x%x%x",
2078 iph6->flow_lbl[0] & 0x0f,
2085 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2086 u32 auid, u32 secid)
2088 struct audit_buffer *audit_buf;
2090 audit_buf = xfrm_audit_start("SAD-add");
2091 if (audit_buf == NULL)
2093 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2094 xfrm_audit_helper_sainfo(x, audit_buf);
2095 audit_log_format(audit_buf, " res=%u", result);
2096 audit_log_end(audit_buf);
2098 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2100 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2101 u32 auid, u32 secid)
2103 struct audit_buffer *audit_buf;
2105 audit_buf = xfrm_audit_start("SAD-delete");
2106 if (audit_buf == NULL)
2108 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2109 xfrm_audit_helper_sainfo(x, audit_buf);
2110 audit_log_format(audit_buf, " res=%u", result);
2111 audit_log_end(audit_buf);
2113 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2115 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2116 struct sk_buff *skb)
2118 struct audit_buffer *audit_buf;
2121 audit_buf = xfrm_audit_start("SA-replay-overflow");
2122 if (audit_buf == NULL)
2124 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2125 /* don't record the sequence number because it's inherent in this kind
2126 * of audit message */
2127 spi = ntohl(x->id.spi);
2128 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2129 audit_log_end(audit_buf);
2131 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2133 static void xfrm_audit_state_replay(struct xfrm_state *x,
2134 struct sk_buff *skb, __be32 net_seq)
2136 struct audit_buffer *audit_buf;
2139 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2140 if (audit_buf == NULL)
2142 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2143 spi = ntohl(x->id.spi);
2144 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2145 spi, spi, ntohl(net_seq));
2146 audit_log_end(audit_buf);
2149 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2151 struct audit_buffer *audit_buf;
2153 audit_buf = xfrm_audit_start("SA-notfound");
2154 if (audit_buf == NULL)
2156 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2157 audit_log_end(audit_buf);
2159 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2161 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2162 __be32 net_spi, __be32 net_seq)
2164 struct audit_buffer *audit_buf;
2167 audit_buf = xfrm_audit_start("SA-notfound");
2168 if (audit_buf == NULL)
2170 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2171 spi = ntohl(net_spi);
2172 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2173 spi, spi, ntohl(net_seq));
2174 audit_log_end(audit_buf);
2176 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2178 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2179 struct sk_buff *skb, u8 proto)
2181 struct audit_buffer *audit_buf;
2185 audit_buf = xfrm_audit_start("SA-icv-failure");
2186 if (audit_buf == NULL)
2188 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2189 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2190 u32 spi = ntohl(net_spi);
2191 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2192 spi, spi, ntohl(net_seq));
2194 audit_log_end(audit_buf);
2196 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2197 #endif /* CONFIG_AUDITSYSCALL */