6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
66 unsigned short family)
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
90 struct hlist_node *entry, *tmp;
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
99 hlist_add_head(&x->bydst, ndsttable+h);
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
104 hlist_add_head(&x->bysrc, nsrctable+h);
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
110 hlist_add_head(&x->byspi, nspitable+h);
115 static unsigned long xfrm_hash_new_size(void)
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
121 static DEFINE_MUTEX(hash_resize_mutex);
123 static void xfrm_hash_resize(struct work_struct *__unused)
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
130 mutex_lock(&hash_resize_mutex);
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
136 nsrc = xfrm_hash_alloc(nsize);
138 xfrm_hash_free(ndst, nsize);
141 nspi = xfrm_hash_alloc(nsize);
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
148 spin_lock_bh(&xfrm_state_lock);
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
165 spin_unlock_bh(&xfrm_state_lock);
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
173 mutex_unlock(&hash_resize_mutex);
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
188 int __xfrm_state_delete(struct xfrm_state *x);
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
207 write_unlock_bh(&xfrm_state_afinfo_lock);
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
224 xfrm_state_unlock_afinfo(afinfo);
227 EXPORT_SYMBOL(xfrm_register_type);
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
239 if (unlikely(typemap[type->proto] != type))
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
246 EXPORT_SYMBOL(xfrm_unregister_type);
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
259 typemap = afinfo->type_map;
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
271 xfrm_state_put_afinfo(afinfo);
275 static void xfrm_put_type(struct xfrm_type *type)
277 module_put(type->owner);
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
299 if (!try_module_get(afinfo->owner))
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
307 xfrm_state_unlock_afinfo(afinfo);
310 EXPORT_SYMBOL(xfrm_register_mode);
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
333 xfrm_state_unlock_afinfo(afinfo);
336 EXPORT_SYMBOL(xfrm_unregister_mode);
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
344 if (unlikely(encap >= XFRM_MODE_MAX))
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
362 xfrm_state_put_afinfo(afinfo);
366 static void xfrm_put_mode(struct xfrm_mode *mode)
368 module_put(mode->owner);
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
381 xfrm_put_mode(x->inner_mode);
383 xfrm_put_mode(x->outer_mode);
385 x->type->destructor(x);
386 xfrm_put_type(x->type);
388 security_xfrm_state_free(x);
392 static void xfrm_state_gc_task(struct work_struct *data)
394 struct xfrm_state *x;
395 struct hlist_node *entry, *tmp;
396 struct hlist_head gc_list;
398 spin_lock_bh(&xfrm_state_gc_lock);
399 gc_list.first = xfrm_state_gc_list.first;
400 INIT_HLIST_HEAD(&xfrm_state_gc_list);
401 spin_unlock_bh(&xfrm_state_gc_lock);
403 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
404 xfrm_state_gc_destroy(x);
409 static inline unsigned long make_jiffies(long secs)
411 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
412 return MAX_SCHEDULE_TIMEOUT-1;
417 static void xfrm_timer_handler(unsigned long data)
419 struct xfrm_state *x = (struct xfrm_state*)data;
420 unsigned long now = get_seconds();
421 long next = LONG_MAX;
426 if (x->km.state == XFRM_STATE_DEAD)
428 if (x->km.state == XFRM_STATE_EXPIRED)
430 if (x->lft.hard_add_expires_seconds) {
431 long tmo = x->lft.hard_add_expires_seconds +
432 x->curlft.add_time - now;
438 if (x->lft.hard_use_expires_seconds) {
439 long tmo = x->lft.hard_use_expires_seconds +
440 (x->curlft.use_time ? : now) - now;
448 if (x->lft.soft_add_expires_seconds) {
449 long tmo = x->lft.soft_add_expires_seconds +
450 x->curlft.add_time - now;
456 if (x->lft.soft_use_expires_seconds) {
457 long tmo = x->lft.soft_use_expires_seconds +
458 (x->curlft.use_time ? : now) - now;
467 km_state_expired(x, 0, 0);
469 if (next != LONG_MAX)
470 mod_timer(&x->timer, jiffies + make_jiffies(next));
475 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
476 x->km.state = XFRM_STATE_EXPIRED;
482 err = __xfrm_state_delete(x);
483 if (!err && x->id.spi)
484 km_state_expired(x, 1, 0);
486 xfrm_audit_state_delete(x, err ? 0 : 1,
487 audit_get_loginuid(current->audit_context), 0);
490 spin_unlock(&x->lock);
493 static void xfrm_replay_timer_handler(unsigned long data);
495 struct xfrm_state *xfrm_state_alloc(void)
497 struct xfrm_state *x;
499 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
502 atomic_set(&x->refcnt, 1);
503 atomic_set(&x->tunnel_users, 0);
504 INIT_HLIST_NODE(&x->bydst);
505 INIT_HLIST_NODE(&x->bysrc);
506 INIT_HLIST_NODE(&x->byspi);
507 init_timer(&x->timer);
508 x->timer.function = xfrm_timer_handler;
509 x->timer.data = (unsigned long)x;
510 init_timer(&x->rtimer);
511 x->rtimer.function = xfrm_replay_timer_handler;
512 x->rtimer.data = (unsigned long)x;
513 x->curlft.add_time = get_seconds();
514 x->lft.soft_byte_limit = XFRM_INF;
515 x->lft.soft_packet_limit = XFRM_INF;
516 x->lft.hard_byte_limit = XFRM_INF;
517 x->lft.hard_packet_limit = XFRM_INF;
518 x->replay_maxage = 0;
519 x->replay_maxdiff = 0;
520 spin_lock_init(&x->lock);
524 EXPORT_SYMBOL(xfrm_state_alloc);
526 void __xfrm_state_destroy(struct xfrm_state *x)
528 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
530 spin_lock_bh(&xfrm_state_gc_lock);
531 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
532 spin_unlock_bh(&xfrm_state_gc_lock);
533 schedule_work(&xfrm_state_gc_work);
535 EXPORT_SYMBOL(__xfrm_state_destroy);
537 int __xfrm_state_delete(struct xfrm_state *x)
541 if (x->km.state != XFRM_STATE_DEAD) {
542 x->km.state = XFRM_STATE_DEAD;
543 spin_lock(&xfrm_state_lock);
544 hlist_del(&x->bydst);
545 hlist_del(&x->bysrc);
547 hlist_del(&x->byspi);
549 spin_unlock(&xfrm_state_lock);
551 /* All xfrm_state objects are created by xfrm_state_alloc.
552 * The xfrm_state_alloc call gives a reference, and that
553 * is what we are dropping here.
561 EXPORT_SYMBOL(__xfrm_state_delete);
563 int xfrm_state_delete(struct xfrm_state *x)
567 spin_lock_bh(&x->lock);
568 err = __xfrm_state_delete(x);
569 spin_unlock_bh(&x->lock);
573 EXPORT_SYMBOL(xfrm_state_delete);
575 #ifdef CONFIG_SECURITY_NETWORK_XFRM
577 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
581 for (i = 0; i <= xfrm_state_hmask; i++) {
582 struct hlist_node *entry;
583 struct xfrm_state *x;
585 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
586 if (xfrm_id_proto_match(x->id.proto, proto) &&
587 (err = security_xfrm_state_delete(x)) != 0) {
588 xfrm_audit_state_delete(x, 0,
589 audit_info->loginuid,
600 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
606 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
610 spin_lock_bh(&xfrm_state_lock);
611 err = xfrm_state_flush_secctx_check(proto, audit_info);
615 for (i = 0; i <= xfrm_state_hmask; i++) {
616 struct hlist_node *entry;
617 struct xfrm_state *x;
619 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
620 if (!xfrm_state_kern(x) &&
621 xfrm_id_proto_match(x->id.proto, proto)) {
623 spin_unlock_bh(&xfrm_state_lock);
625 err = xfrm_state_delete(x);
626 xfrm_audit_state_delete(x, err ? 0 : 1,
627 audit_info->loginuid,
631 spin_lock_bh(&xfrm_state_lock);
639 spin_unlock_bh(&xfrm_state_lock);
643 EXPORT_SYMBOL(xfrm_state_flush);
645 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
647 spin_lock_bh(&xfrm_state_lock);
648 si->sadcnt = xfrm_state_num;
649 si->sadhcnt = xfrm_state_hmask;
650 si->sadhmcnt = xfrm_state_hashmax;
651 spin_unlock_bh(&xfrm_state_lock);
653 EXPORT_SYMBOL(xfrm_sad_getinfo);
656 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
657 struct xfrm_tmpl *tmpl,
658 xfrm_address_t *daddr, xfrm_address_t *saddr,
659 unsigned short family)
661 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
664 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
665 xfrm_state_put_afinfo(afinfo);
669 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
671 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
672 struct xfrm_state *x;
673 struct hlist_node *entry;
675 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
676 if (x->props.family != family ||
678 x->id.proto != proto)
683 if (x->id.daddr.a4 != daddr->a4)
687 if (!ipv6_addr_equal((struct in6_addr *)daddr,
701 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
703 unsigned int h = xfrm_src_hash(daddr, saddr, family);
704 struct xfrm_state *x;
705 struct hlist_node *entry;
707 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
708 if (x->props.family != family ||
709 x->id.proto != proto)
714 if (x->id.daddr.a4 != daddr->a4 ||
715 x->props.saddr.a4 != saddr->a4)
719 if (!ipv6_addr_equal((struct in6_addr *)daddr,
722 !ipv6_addr_equal((struct in6_addr *)saddr,
736 static inline struct xfrm_state *
737 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
740 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
741 x->id.proto, family);
743 return __xfrm_state_lookup_byaddr(&x->id.daddr,
745 x->id.proto, family);
748 static void xfrm_hash_grow_check(int have_hash_collision)
750 if (have_hash_collision &&
751 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
752 xfrm_state_num > xfrm_state_hmask)
753 schedule_work(&xfrm_hash_work);
757 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
758 struct flowi *fl, struct xfrm_tmpl *tmpl,
759 struct xfrm_policy *pol, int *err,
760 unsigned short family)
762 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
763 struct hlist_node *entry;
764 struct xfrm_state *x, *x0;
765 int acquire_in_progress = 0;
767 struct xfrm_state *best = NULL;
769 spin_lock_bh(&xfrm_state_lock);
770 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
771 if (x->props.family == family &&
772 x->props.reqid == tmpl->reqid &&
773 !(x->props.flags & XFRM_STATE_WILDRECV) &&
774 xfrm_state_addr_check(x, daddr, saddr, family) &&
775 tmpl->mode == x->props.mode &&
776 tmpl->id.proto == x->id.proto &&
777 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
779 1. There is a valid state with matching selector.
781 2. Valid state with inappropriate selector. Skip.
783 Entering area of "sysdeps".
785 3. If state is not valid, selector is temporary,
786 it selects only session which triggered
787 previous resolution. Key manager will do
788 something to install a state with proper
791 if (x->km.state == XFRM_STATE_VALID) {
792 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
793 !security_xfrm_state_pol_flow_match(x, pol, fl))
796 best->km.dying > x->km.dying ||
797 (best->km.dying == x->km.dying &&
798 best->curlft.add_time < x->curlft.add_time))
800 } else if (x->km.state == XFRM_STATE_ACQ) {
801 acquire_in_progress = 1;
802 } else if (x->km.state == XFRM_STATE_ERROR ||
803 x->km.state == XFRM_STATE_EXPIRED) {
804 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
805 security_xfrm_state_pol_flow_match(x, pol, fl))
812 if (!x && !error && !acquire_in_progress) {
814 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
815 tmpl->id.proto, family)) != NULL) {
820 x = xfrm_state_alloc();
825 /* Initialize temporary selector matching only
826 * to current session. */
827 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
829 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
831 x->km.state = XFRM_STATE_DEAD;
837 if (km_query(x, tmpl, pol) == 0) {
838 x->km.state = XFRM_STATE_ACQ;
839 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
840 h = xfrm_src_hash(daddr, saddr, family);
841 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
843 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
844 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
846 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
847 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
848 add_timer(&x->timer);
850 xfrm_hash_grow_check(x->bydst.next != NULL);
852 x->km.state = XFRM_STATE_DEAD;
862 *err = acquire_in_progress ? -EAGAIN : error;
863 spin_unlock_bh(&xfrm_state_lock);
868 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
869 unsigned short family, u8 mode, u8 proto, u32 reqid)
871 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
872 struct xfrm_state *rx = NULL, *x = NULL;
873 struct hlist_node *entry;
875 spin_lock(&xfrm_state_lock);
876 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
877 if (x->props.family == family &&
878 x->props.reqid == reqid &&
879 !(x->props.flags & XFRM_STATE_WILDRECV) &&
880 xfrm_state_addr_check(x, daddr, saddr, family) &&
881 mode == x->props.mode &&
882 proto == x->id.proto &&
883 x->km.state == XFRM_STATE_VALID) {
891 spin_unlock(&xfrm_state_lock);
896 EXPORT_SYMBOL(xfrm_stateonly_find);
898 static void __xfrm_state_insert(struct xfrm_state *x)
902 x->genid = ++xfrm_state_genid;
904 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
905 x->props.reqid, x->props.family);
906 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
908 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
909 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
912 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
915 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
918 mod_timer(&x->timer, jiffies + HZ);
919 if (x->replay_maxage)
920 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
926 xfrm_hash_grow_check(x->bydst.next != NULL);
929 /* xfrm_state_lock is held */
930 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
932 unsigned short family = xnew->props.family;
933 u32 reqid = xnew->props.reqid;
934 struct xfrm_state *x;
935 struct hlist_node *entry;
938 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
939 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
940 if (x->props.family == family &&
941 x->props.reqid == reqid &&
942 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
943 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
944 x->genid = xfrm_state_genid;
948 void xfrm_state_insert(struct xfrm_state *x)
950 spin_lock_bh(&xfrm_state_lock);
951 __xfrm_state_bump_genids(x);
952 __xfrm_state_insert(x);
953 spin_unlock_bh(&xfrm_state_lock);
955 EXPORT_SYMBOL(xfrm_state_insert);
957 /* xfrm_state_lock is held */
958 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
960 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
961 struct hlist_node *entry;
962 struct xfrm_state *x;
964 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
965 if (x->props.reqid != reqid ||
966 x->props.mode != mode ||
967 x->props.family != family ||
968 x->km.state != XFRM_STATE_ACQ ||
970 x->id.proto != proto)
975 if (x->id.daddr.a4 != daddr->a4 ||
976 x->props.saddr.a4 != saddr->a4)
980 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
981 (struct in6_addr *)daddr) ||
982 !ipv6_addr_equal((struct in6_addr *)
984 (struct in6_addr *)saddr))
996 x = xfrm_state_alloc();
1000 x->sel.daddr.a4 = daddr->a4;
1001 x->sel.saddr.a4 = saddr->a4;
1002 x->sel.prefixlen_d = 32;
1003 x->sel.prefixlen_s = 32;
1004 x->props.saddr.a4 = saddr->a4;
1005 x->id.daddr.a4 = daddr->a4;
1009 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1010 (struct in6_addr *)daddr);
1011 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1012 (struct in6_addr *)saddr);
1013 x->sel.prefixlen_d = 128;
1014 x->sel.prefixlen_s = 128;
1015 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1016 (struct in6_addr *)saddr);
1017 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1018 (struct in6_addr *)daddr);
1022 x->km.state = XFRM_STATE_ACQ;
1023 x->id.proto = proto;
1024 x->props.family = family;
1025 x->props.mode = mode;
1026 x->props.reqid = reqid;
1027 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1029 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1030 add_timer(&x->timer);
1031 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1032 h = xfrm_src_hash(daddr, saddr, family);
1033 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1037 xfrm_hash_grow_check(x->bydst.next != NULL);
1043 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1045 int xfrm_state_add(struct xfrm_state *x)
1047 struct xfrm_state *x1;
1050 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1052 family = x->props.family;
1054 spin_lock_bh(&xfrm_state_lock);
1056 x1 = __xfrm_state_locate(x, use_spi, family);
1064 if (use_spi && x->km.seq) {
1065 x1 = __xfrm_find_acq_byseq(x->km.seq);
1066 if (x1 && ((x1->id.proto != x->id.proto) ||
1067 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1074 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1076 &x->id.daddr, &x->props.saddr, 0);
1078 __xfrm_state_bump_genids(x);
1079 __xfrm_state_insert(x);
1083 spin_unlock_bh(&xfrm_state_lock);
1086 xfrm_state_delete(x1);
1092 EXPORT_SYMBOL(xfrm_state_add);
1094 #ifdef CONFIG_XFRM_MIGRATE
1095 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1098 struct xfrm_state *x = xfrm_state_alloc();
1102 memcpy(&x->id, &orig->id, sizeof(x->id));
1103 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1104 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1105 x->props.mode = orig->props.mode;
1106 x->props.replay_window = orig->props.replay_window;
1107 x->props.reqid = orig->props.reqid;
1108 x->props.family = orig->props.family;
1109 x->props.saddr = orig->props.saddr;
1112 x->aalg = xfrm_algo_clone(orig->aalg);
1116 x->props.aalgo = orig->props.aalgo;
1119 x->ealg = xfrm_algo_clone(orig->ealg);
1123 x->props.ealgo = orig->props.ealgo;
1126 x->calg = xfrm_algo_clone(orig->calg);
1130 x->props.calgo = orig->props.calgo;
1133 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1139 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1145 err = xfrm_init_state(x);
1149 x->props.flags = orig->props.flags;
1151 x->curlft.add_time = orig->curlft.add_time;
1152 x->km.state = orig->km.state;
1153 x->km.seq = orig->km.seq;
1170 EXPORT_SYMBOL(xfrm_state_clone);
1172 /* xfrm_state_lock is held */
1173 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1176 struct xfrm_state *x;
1177 struct hlist_node *entry;
1180 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1181 m->reqid, m->old_family);
1182 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1183 if (x->props.mode != m->mode ||
1184 x->id.proto != m->proto)
1186 if (m->reqid && x->props.reqid != m->reqid)
1188 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1190 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1197 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1199 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1200 if (x->props.mode != m->mode ||
1201 x->id.proto != m->proto)
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1215 EXPORT_SYMBOL(xfrm_migrate_state_find);
1217 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1218 struct xfrm_migrate *m)
1220 struct xfrm_state *xc;
1223 xc = xfrm_state_clone(x, &err);
1227 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1228 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1231 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1232 /* a care is needed when the destination address of the
1233 state is to be updated as it is a part of triplet */
1234 xfrm_state_insert(xc);
1236 if ((err = xfrm_state_add(xc)) < 0)
1245 EXPORT_SYMBOL(xfrm_state_migrate);
1248 int xfrm_state_update(struct xfrm_state *x)
1250 struct xfrm_state *x1;
1252 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1254 spin_lock_bh(&xfrm_state_lock);
1255 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1261 if (xfrm_state_kern(x1)) {
1267 if (x1->km.state == XFRM_STATE_ACQ) {
1268 __xfrm_state_insert(x);
1274 spin_unlock_bh(&xfrm_state_lock);
1280 xfrm_state_delete(x1);
1286 spin_lock_bh(&x1->lock);
1287 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1288 if (x->encap && x1->encap)
1289 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1290 if (x->coaddr && x1->coaddr) {
1291 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1293 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1294 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1295 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1298 mod_timer(&x1->timer, jiffies + HZ);
1299 if (x1->curlft.use_time)
1300 xfrm_state_check_expire(x1);
1304 spin_unlock_bh(&x1->lock);
1310 EXPORT_SYMBOL(xfrm_state_update);
1312 int xfrm_state_check_expire(struct xfrm_state *x)
1314 if (!x->curlft.use_time)
1315 x->curlft.use_time = get_seconds();
1317 if (x->km.state != XFRM_STATE_VALID)
1320 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1321 x->curlft.packets >= x->lft.hard_packet_limit) {
1322 x->km.state = XFRM_STATE_EXPIRED;
1323 mod_timer(&x->timer, jiffies);
1328 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1329 x->curlft.packets >= x->lft.soft_packet_limit)) {
1331 km_state_expired(x, 0, 0);
1335 EXPORT_SYMBOL(xfrm_state_check_expire);
1338 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1339 unsigned short family)
1341 struct xfrm_state *x;
1343 spin_lock_bh(&xfrm_state_lock);
1344 x = __xfrm_state_lookup(daddr, spi, proto, family);
1345 spin_unlock_bh(&xfrm_state_lock);
1348 EXPORT_SYMBOL(xfrm_state_lookup);
1351 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1352 u8 proto, unsigned short family)
1354 struct xfrm_state *x;
1356 spin_lock_bh(&xfrm_state_lock);
1357 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1358 spin_unlock_bh(&xfrm_state_lock);
1361 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1364 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1365 xfrm_address_t *daddr, xfrm_address_t *saddr,
1366 int create, unsigned short family)
1368 struct xfrm_state *x;
1370 spin_lock_bh(&xfrm_state_lock);
1371 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1372 spin_unlock_bh(&xfrm_state_lock);
1376 EXPORT_SYMBOL(xfrm_find_acq);
1378 #ifdef CONFIG_XFRM_SUB_POLICY
1380 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1381 unsigned short family)
1384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1386 return -EAFNOSUPPORT;
1388 spin_lock_bh(&xfrm_state_lock);
1389 if (afinfo->tmpl_sort)
1390 err = afinfo->tmpl_sort(dst, src, n);
1391 spin_unlock_bh(&xfrm_state_lock);
1392 xfrm_state_put_afinfo(afinfo);
1395 EXPORT_SYMBOL(xfrm_tmpl_sort);
1398 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1399 unsigned short family)
1402 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1404 return -EAFNOSUPPORT;
1406 spin_lock_bh(&xfrm_state_lock);
1407 if (afinfo->state_sort)
1408 err = afinfo->state_sort(dst, src, n);
1409 spin_unlock_bh(&xfrm_state_lock);
1410 xfrm_state_put_afinfo(afinfo);
1413 EXPORT_SYMBOL(xfrm_state_sort);
1416 /* Silly enough, but I'm lazy to build resolution list */
1418 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1422 for (i = 0; i <= xfrm_state_hmask; i++) {
1423 struct hlist_node *entry;
1424 struct xfrm_state *x;
1426 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1427 if (x->km.seq == seq &&
1428 x->km.state == XFRM_STATE_ACQ) {
1437 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1439 struct xfrm_state *x;
1441 spin_lock_bh(&xfrm_state_lock);
1442 x = __xfrm_find_acq_byseq(seq);
1443 spin_unlock_bh(&xfrm_state_lock);
1446 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1448 u32 xfrm_get_acqseq(void)
1452 static DEFINE_SPINLOCK(acqseq_lock);
1454 spin_lock_bh(&acqseq_lock);
1455 res = (++acqseq ? : ++acqseq);
1456 spin_unlock_bh(&acqseq_lock);
1459 EXPORT_SYMBOL(xfrm_get_acqseq);
1461 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1464 struct xfrm_state *x0;
1466 __be32 minspi = htonl(low);
1467 __be32 maxspi = htonl(high);
1469 spin_lock_bh(&x->lock);
1470 if (x->km.state == XFRM_STATE_DEAD)
1479 if (minspi == maxspi) {
1480 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1488 for (h=0; h<high-low+1; h++) {
1489 spi = low + net_random()%(high-low+1);
1490 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1492 x->id.spi = htonl(spi);
1499 spin_lock_bh(&xfrm_state_lock);
1500 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1501 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1502 spin_unlock_bh(&xfrm_state_lock);
1508 spin_unlock_bh(&x->lock);
1512 EXPORT_SYMBOL(xfrm_alloc_spi);
1514 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1518 struct xfrm_state *x, *last = NULL;
1519 struct hlist_node *entry;
1523 spin_lock_bh(&xfrm_state_lock);
1524 for (i = 0; i <= xfrm_state_hmask; i++) {
1525 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1526 if (!xfrm_id_proto_match(x->id.proto, proto))
1529 err = func(last, count, data);
1541 err = func(last, 0, data);
1543 spin_unlock_bh(&xfrm_state_lock);
1546 EXPORT_SYMBOL(xfrm_state_walk);
1549 void xfrm_replay_notify(struct xfrm_state *x, int event)
1552 /* we send notify messages in case
1553 * 1. we updated on of the sequence numbers, and the seqno difference
1554 * is at least x->replay_maxdiff, in this case we also update the
1555 * timeout of our timer function
1556 * 2. if x->replay_maxage has elapsed since last update,
1557 * and there were changes
1559 * The state structure must be locked!
1563 case XFRM_REPLAY_UPDATE:
1564 if (x->replay_maxdiff &&
1565 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1566 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1567 if (x->xflags & XFRM_TIME_DEFER)
1568 event = XFRM_REPLAY_TIMEOUT;
1575 case XFRM_REPLAY_TIMEOUT:
1576 if ((x->replay.seq == x->preplay.seq) &&
1577 (x->replay.bitmap == x->preplay.bitmap) &&
1578 (x->replay.oseq == x->preplay.oseq)) {
1579 x->xflags |= XFRM_TIME_DEFER;
1586 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1587 c.event = XFRM_MSG_NEWAE;
1588 c.data.aevent = event;
1589 km_state_notify(x, &c);
1591 if (x->replay_maxage &&
1592 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1593 x->xflags &= ~XFRM_TIME_DEFER;
1596 static void xfrm_replay_timer_handler(unsigned long data)
1598 struct xfrm_state *x = (struct xfrm_state*)data;
1600 spin_lock(&x->lock);
1602 if (x->km.state == XFRM_STATE_VALID) {
1603 if (xfrm_aevent_is_on())
1604 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1606 x->xflags |= XFRM_TIME_DEFER;
1609 spin_unlock(&x->lock);
1612 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1615 u32 seq = ntohl(net_seq);
1617 if (unlikely(seq == 0))
1620 if (likely(seq > x->replay.seq))
1623 diff = x->replay.seq - seq;
1624 if (diff >= min_t(unsigned int, x->props.replay_window,
1625 sizeof(x->replay.bitmap) * 8)) {
1626 x->stats.replay_window++;
1630 if (x->replay.bitmap & (1U << diff)) {
1636 EXPORT_SYMBOL(xfrm_replay_check);
1638 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1641 u32 seq = ntohl(net_seq);
1643 if (seq > x->replay.seq) {
1644 diff = seq - x->replay.seq;
1645 if (diff < x->props.replay_window)
1646 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1648 x->replay.bitmap = 1;
1649 x->replay.seq = seq;
1651 diff = x->replay.seq - seq;
1652 x->replay.bitmap |= (1U << diff);
1655 if (xfrm_aevent_is_on())
1656 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1658 EXPORT_SYMBOL(xfrm_replay_advance);
1660 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1661 static DEFINE_RWLOCK(xfrm_km_lock);
1663 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1665 struct xfrm_mgr *km;
1667 read_lock(&xfrm_km_lock);
1668 list_for_each_entry(km, &xfrm_km_list, list)
1669 if (km->notify_policy)
1670 km->notify_policy(xp, dir, c);
1671 read_unlock(&xfrm_km_lock);
1674 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1676 struct xfrm_mgr *km;
1677 read_lock(&xfrm_km_lock);
1678 list_for_each_entry(km, &xfrm_km_list, list)
1681 read_unlock(&xfrm_km_lock);
1684 EXPORT_SYMBOL(km_policy_notify);
1685 EXPORT_SYMBOL(km_state_notify);
1687 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1693 c.event = XFRM_MSG_EXPIRE;
1694 km_state_notify(x, &c);
1700 EXPORT_SYMBOL(km_state_expired);
1702 * We send to all registered managers regardless of failure
1703 * We are happy with one success
1705 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1707 int err = -EINVAL, acqret;
1708 struct xfrm_mgr *km;
1710 read_lock(&xfrm_km_lock);
1711 list_for_each_entry(km, &xfrm_km_list, list) {
1712 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1716 read_unlock(&xfrm_km_lock);
1719 EXPORT_SYMBOL(km_query);
1721 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1724 struct xfrm_mgr *km;
1726 read_lock(&xfrm_km_lock);
1727 list_for_each_entry(km, &xfrm_km_list, list) {
1728 if (km->new_mapping)
1729 err = km->new_mapping(x, ipaddr, sport);
1733 read_unlock(&xfrm_km_lock);
1736 EXPORT_SYMBOL(km_new_mapping);
1738 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1744 c.event = XFRM_MSG_POLEXPIRE;
1745 km_policy_notify(pol, dir, &c);
1750 EXPORT_SYMBOL(km_policy_expired);
1752 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1753 struct xfrm_migrate *m, int num_migrate)
1757 struct xfrm_mgr *km;
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1762 ret = km->migrate(sel, dir, type, m, num_migrate);
1767 read_unlock(&xfrm_km_lock);
1770 EXPORT_SYMBOL(km_migrate);
1772 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1776 struct xfrm_mgr *km;
1778 read_lock(&xfrm_km_lock);
1779 list_for_each_entry(km, &xfrm_km_list, list) {
1781 ret = km->report(proto, sel, addr);
1786 read_unlock(&xfrm_km_lock);
1789 EXPORT_SYMBOL(km_report);
1791 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1795 struct xfrm_mgr *km;
1796 struct xfrm_policy *pol = NULL;
1798 if (optlen <= 0 || optlen > PAGE_SIZE)
1801 data = kmalloc(optlen, GFP_KERNEL);
1806 if (copy_from_user(data, optval, optlen))
1810 read_lock(&xfrm_km_lock);
1811 list_for_each_entry(km, &xfrm_km_list, list) {
1812 pol = km->compile_policy(sk, optname, data,
1817 read_unlock(&xfrm_km_lock);
1820 xfrm_sk_policy_insert(sk, err, pol);
1829 EXPORT_SYMBOL(xfrm_user_policy);
1831 int xfrm_register_km(struct xfrm_mgr *km)
1833 write_lock_bh(&xfrm_km_lock);
1834 list_add_tail(&km->list, &xfrm_km_list);
1835 write_unlock_bh(&xfrm_km_lock);
1838 EXPORT_SYMBOL(xfrm_register_km);
1840 int xfrm_unregister_km(struct xfrm_mgr *km)
1842 write_lock_bh(&xfrm_km_lock);
1843 list_del(&km->list);
1844 write_unlock_bh(&xfrm_km_lock);
1847 EXPORT_SYMBOL(xfrm_unregister_km);
1849 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1852 if (unlikely(afinfo == NULL))
1854 if (unlikely(afinfo->family >= NPROTO))
1855 return -EAFNOSUPPORT;
1856 write_lock_bh(&xfrm_state_afinfo_lock);
1857 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1860 xfrm_state_afinfo[afinfo->family] = afinfo;
1861 write_unlock_bh(&xfrm_state_afinfo_lock);
1864 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1866 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1869 if (unlikely(afinfo == NULL))
1871 if (unlikely(afinfo->family >= NPROTO))
1872 return -EAFNOSUPPORT;
1873 write_lock_bh(&xfrm_state_afinfo_lock);
1874 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1875 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1878 xfrm_state_afinfo[afinfo->family] = NULL;
1880 write_unlock_bh(&xfrm_state_afinfo_lock);
1883 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1885 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1887 struct xfrm_state_afinfo *afinfo;
1888 if (unlikely(family >= NPROTO))
1890 read_lock(&xfrm_state_afinfo_lock);
1891 afinfo = xfrm_state_afinfo[family];
1892 if (unlikely(!afinfo))
1893 read_unlock(&xfrm_state_afinfo_lock);
1897 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1899 read_unlock(&xfrm_state_afinfo_lock);
1902 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1903 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1906 struct xfrm_state *t = x->tunnel;
1908 if (atomic_read(&t->tunnel_users) == 2)
1909 xfrm_state_delete(t);
1910 atomic_dec(&t->tunnel_users);
1915 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1917 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1921 spin_lock_bh(&x->lock);
1922 if (x->km.state == XFRM_STATE_VALID &&
1923 x->type && x->type->get_mtu)
1924 res = x->type->get_mtu(x, mtu);
1926 res = mtu - x->props.header_len;
1927 spin_unlock_bh(&x->lock);
1931 int xfrm_init_state(struct xfrm_state *x)
1933 struct xfrm_state_afinfo *afinfo;
1934 int family = x->props.family;
1937 err = -EAFNOSUPPORT;
1938 afinfo = xfrm_state_get_afinfo(family);
1943 if (afinfo->init_flags)
1944 err = afinfo->init_flags(x);
1946 xfrm_state_put_afinfo(afinfo);
1951 err = -EPROTONOSUPPORT;
1952 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1953 if (x->inner_mode == NULL)
1956 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1957 family != x->sel.family)
1960 x->type = xfrm_get_type(x->id.proto, family);
1961 if (x->type == NULL)
1964 err = x->type->init_state(x);
1968 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1969 if (x->outer_mode == NULL)
1972 x->km.state = XFRM_STATE_VALID;
1978 EXPORT_SYMBOL(xfrm_init_state);
1980 void __init xfrm_state_init(void)
1984 sz = sizeof(struct hlist_head) * 8;
1986 xfrm_state_bydst = xfrm_hash_alloc(sz);
1987 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1988 xfrm_state_byspi = xfrm_hash_alloc(sz);
1989 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1990 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1991 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1993 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1996 #ifdef CONFIG_AUDITSYSCALL
1997 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1998 struct audit_buffer *audit_buf)
2001 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2002 x->security->ctx_alg, x->security->ctx_doi,
2003 x->security->ctx_str);
2005 switch(x->props.family) {
2007 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2008 NIPQUAD(x->props.saddr.a4),
2009 NIPQUAD(x->id.daddr.a4));
2013 struct in6_addr saddr6, daddr6;
2015 memcpy(&saddr6, x->props.saddr.a6,
2016 sizeof(struct in6_addr));
2017 memcpy(&daddr6, x->id.daddr.a6,
2018 sizeof(struct in6_addr));
2019 audit_log_format(audit_buf,
2020 " src=" NIP6_FMT " dst=" NIP6_FMT,
2021 NIP6(saddr6), NIP6(daddr6));
2028 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2030 struct audit_buffer *audit_buf;
2031 extern int audit_enabled;
2033 if (audit_enabled == 0)
2035 audit_buf = xfrm_audit_start(sid, auid);
2036 if (audit_buf == NULL)
2038 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2039 xfrm_audit_common_stateinfo(x, audit_buf);
2040 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2041 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2042 audit_log_end(audit_buf);
2044 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2047 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2049 struct audit_buffer *audit_buf;
2050 extern int audit_enabled;
2052 if (audit_enabled == 0)
2054 audit_buf = xfrm_audit_start(sid, auid);
2055 if (audit_buf == NULL)
2057 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2058 xfrm_audit_common_stateinfo(x, audit_buf);
2059 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2060 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2061 audit_log_end(audit_buf);
2063 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2064 #endif /* CONFIG_AUDITSYSCALL */