6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
66 unsigned short family)
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
90 struct hlist_node *entry, *tmp;
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
99 hlist_add_head(&x->bydst, ndsttable+h);
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
104 hlist_add_head(&x->bysrc, nsrctable+h);
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
110 hlist_add_head(&x->byspi, nspitable+h);
115 static unsigned long xfrm_hash_new_size(void)
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
121 static DEFINE_MUTEX(hash_resize_mutex);
123 static void xfrm_hash_resize(struct work_struct *__unused)
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
130 mutex_lock(&hash_resize_mutex);
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
136 nsrc = xfrm_hash_alloc(nsize);
138 xfrm_hash_free(ndst, nsize);
141 nspi = xfrm_hash_alloc(nsize);
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
148 spin_lock_bh(&xfrm_state_lock);
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
165 spin_unlock_bh(&xfrm_state_lock);
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
173 mutex_unlock(&hash_resize_mutex);
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
188 int __xfrm_state_delete(struct xfrm_state *x);
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
207 write_unlock_bh(&xfrm_state_afinfo_lock);
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
224 xfrm_state_unlock_afinfo(afinfo);
227 EXPORT_SYMBOL(xfrm_register_type);
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
239 if (unlikely(typemap[type->proto] != type))
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
246 EXPORT_SYMBOL(xfrm_unregister_type);
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
259 typemap = afinfo->type_map;
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
271 xfrm_state_put_afinfo(afinfo);
275 static void xfrm_put_type(struct xfrm_type *type)
277 module_put(type->owner);
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
299 if (!try_module_get(afinfo->owner))
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
307 xfrm_state_unlock_afinfo(afinfo);
310 EXPORT_SYMBOL(xfrm_register_mode);
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
333 xfrm_state_unlock_afinfo(afinfo);
336 EXPORT_SYMBOL(xfrm_unregister_mode);
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
344 if (unlikely(encap >= XFRM_MODE_MAX))
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
362 xfrm_state_put_afinfo(afinfo);
366 static void xfrm_put_mode(struct xfrm_mode *mode)
368 module_put(mode->owner);
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
381 xfrm_put_mode(x->inner_mode);
383 xfrm_put_mode(x->outer_mode);
385 x->type->destructor(x);
386 xfrm_put_type(x->type);
388 security_xfrm_state_free(x);
392 static void xfrm_state_gc_task(struct work_struct *data)
394 struct xfrm_state *x;
395 struct hlist_node *entry, *tmp;
396 struct hlist_head gc_list;
398 spin_lock_bh(&xfrm_state_gc_lock);
399 gc_list.first = xfrm_state_gc_list.first;
400 INIT_HLIST_HEAD(&xfrm_state_gc_list);
401 spin_unlock_bh(&xfrm_state_gc_lock);
403 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
404 xfrm_state_gc_destroy(x);
409 static inline unsigned long make_jiffies(long secs)
411 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
412 return MAX_SCHEDULE_TIMEOUT-1;
417 static void xfrm_timer_handler(unsigned long data)
419 struct xfrm_state *x = (struct xfrm_state*)data;
420 unsigned long now = get_seconds();
421 long next = LONG_MAX;
426 if (x->km.state == XFRM_STATE_DEAD)
428 if (x->km.state == XFRM_STATE_EXPIRED)
430 if (x->lft.hard_add_expires_seconds) {
431 long tmo = x->lft.hard_add_expires_seconds +
432 x->curlft.add_time - now;
438 if (x->lft.hard_use_expires_seconds) {
439 long tmo = x->lft.hard_use_expires_seconds +
440 (x->curlft.use_time ? : now) - now;
448 if (x->lft.soft_add_expires_seconds) {
449 long tmo = x->lft.soft_add_expires_seconds +
450 x->curlft.add_time - now;
456 if (x->lft.soft_use_expires_seconds) {
457 long tmo = x->lft.soft_use_expires_seconds +
458 (x->curlft.use_time ? : now) - now;
467 km_state_expired(x, 0, 0);
469 if (next != LONG_MAX)
470 mod_timer(&x->timer, jiffies + make_jiffies(next));
475 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
476 x->km.state = XFRM_STATE_EXPIRED;
482 err = __xfrm_state_delete(x);
483 if (!err && x->id.spi)
484 km_state_expired(x, 1, 0);
486 xfrm_audit_state_delete(x, err ? 0 : 1,
487 audit_get_loginuid(current->audit_context), 0);
490 spin_unlock(&x->lock);
493 static void xfrm_replay_timer_handler(unsigned long data);
495 struct xfrm_state *xfrm_state_alloc(void)
497 struct xfrm_state *x;
499 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
502 atomic_set(&x->refcnt, 1);
503 atomic_set(&x->tunnel_users, 0);
504 INIT_HLIST_NODE(&x->bydst);
505 INIT_HLIST_NODE(&x->bysrc);
506 INIT_HLIST_NODE(&x->byspi);
507 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
508 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
510 x->curlft.add_time = get_seconds();
511 x->lft.soft_byte_limit = XFRM_INF;
512 x->lft.soft_packet_limit = XFRM_INF;
513 x->lft.hard_byte_limit = XFRM_INF;
514 x->lft.hard_packet_limit = XFRM_INF;
515 x->replay_maxage = 0;
516 x->replay_maxdiff = 0;
517 spin_lock_init(&x->lock);
521 EXPORT_SYMBOL(xfrm_state_alloc);
523 void __xfrm_state_destroy(struct xfrm_state *x)
525 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
527 spin_lock_bh(&xfrm_state_gc_lock);
528 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
529 spin_unlock_bh(&xfrm_state_gc_lock);
530 schedule_work(&xfrm_state_gc_work);
532 EXPORT_SYMBOL(__xfrm_state_destroy);
534 int __xfrm_state_delete(struct xfrm_state *x)
538 if (x->km.state != XFRM_STATE_DEAD) {
539 x->km.state = XFRM_STATE_DEAD;
540 spin_lock(&xfrm_state_lock);
541 hlist_del(&x->bydst);
542 hlist_del(&x->bysrc);
544 hlist_del(&x->byspi);
546 spin_unlock(&xfrm_state_lock);
548 /* All xfrm_state objects are created by xfrm_state_alloc.
549 * The xfrm_state_alloc call gives a reference, and that
550 * is what we are dropping here.
558 EXPORT_SYMBOL(__xfrm_state_delete);
560 int xfrm_state_delete(struct xfrm_state *x)
564 spin_lock_bh(&x->lock);
565 err = __xfrm_state_delete(x);
566 spin_unlock_bh(&x->lock);
570 EXPORT_SYMBOL(xfrm_state_delete);
572 #ifdef CONFIG_SECURITY_NETWORK_XFRM
574 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
578 for (i = 0; i <= xfrm_state_hmask; i++) {
579 struct hlist_node *entry;
580 struct xfrm_state *x;
582 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
583 if (xfrm_id_proto_match(x->id.proto, proto) &&
584 (err = security_xfrm_state_delete(x)) != 0) {
585 xfrm_audit_state_delete(x, 0,
586 audit_info->loginuid,
597 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
603 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
607 spin_lock_bh(&xfrm_state_lock);
608 err = xfrm_state_flush_secctx_check(proto, audit_info);
612 for (i = 0; i <= xfrm_state_hmask; i++) {
613 struct hlist_node *entry;
614 struct xfrm_state *x;
616 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
617 if (!xfrm_state_kern(x) &&
618 xfrm_id_proto_match(x->id.proto, proto)) {
620 spin_unlock_bh(&xfrm_state_lock);
622 err = xfrm_state_delete(x);
623 xfrm_audit_state_delete(x, err ? 0 : 1,
624 audit_info->loginuid,
628 spin_lock_bh(&xfrm_state_lock);
636 spin_unlock_bh(&xfrm_state_lock);
640 EXPORT_SYMBOL(xfrm_state_flush);
642 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
644 spin_lock_bh(&xfrm_state_lock);
645 si->sadcnt = xfrm_state_num;
646 si->sadhcnt = xfrm_state_hmask;
647 si->sadhmcnt = xfrm_state_hashmax;
648 spin_unlock_bh(&xfrm_state_lock);
650 EXPORT_SYMBOL(xfrm_sad_getinfo);
653 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
654 struct xfrm_tmpl *tmpl,
655 xfrm_address_t *daddr, xfrm_address_t *saddr,
656 unsigned short family)
658 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
661 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
662 xfrm_state_put_afinfo(afinfo);
666 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
668 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
669 struct xfrm_state *x;
670 struct hlist_node *entry;
672 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
673 if (x->props.family != family ||
675 x->id.proto != proto)
680 if (x->id.daddr.a4 != daddr->a4)
684 if (!ipv6_addr_equal((struct in6_addr *)daddr,
698 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
700 unsigned int h = xfrm_src_hash(daddr, saddr, family);
701 struct xfrm_state *x;
702 struct hlist_node *entry;
704 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
705 if (x->props.family != family ||
706 x->id.proto != proto)
711 if (x->id.daddr.a4 != daddr->a4 ||
712 x->props.saddr.a4 != saddr->a4)
716 if (!ipv6_addr_equal((struct in6_addr *)daddr,
719 !ipv6_addr_equal((struct in6_addr *)saddr,
733 static inline struct xfrm_state *
734 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
737 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
738 x->id.proto, family);
740 return __xfrm_state_lookup_byaddr(&x->id.daddr,
742 x->id.proto, family);
745 static void xfrm_hash_grow_check(int have_hash_collision)
747 if (have_hash_collision &&
748 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
749 xfrm_state_num > xfrm_state_hmask)
750 schedule_work(&xfrm_hash_work);
754 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
755 struct flowi *fl, struct xfrm_tmpl *tmpl,
756 struct xfrm_policy *pol, int *err,
757 unsigned short family)
760 struct hlist_node *entry;
761 struct xfrm_state *x, *x0;
762 int acquire_in_progress = 0;
764 struct xfrm_state *best = NULL;
766 spin_lock_bh(&xfrm_state_lock);
767 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
768 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
769 if (x->props.family == family &&
770 x->props.reqid == tmpl->reqid &&
771 !(x->props.flags & XFRM_STATE_WILDRECV) &&
772 xfrm_state_addr_check(x, daddr, saddr, family) &&
773 tmpl->mode == x->props.mode &&
774 tmpl->id.proto == x->id.proto &&
775 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
777 1. There is a valid state with matching selector.
779 2. Valid state with inappropriate selector. Skip.
781 Entering area of "sysdeps".
783 3. If state is not valid, selector is temporary,
784 it selects only session which triggered
785 previous resolution. Key manager will do
786 something to install a state with proper
789 if (x->km.state == XFRM_STATE_VALID) {
790 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
791 !security_xfrm_state_pol_flow_match(x, pol, fl))
794 best->km.dying > x->km.dying ||
795 (best->km.dying == x->km.dying &&
796 best->curlft.add_time < x->curlft.add_time))
798 } else if (x->km.state == XFRM_STATE_ACQ) {
799 acquire_in_progress = 1;
800 } else if (x->km.state == XFRM_STATE_ERROR ||
801 x->km.state == XFRM_STATE_EXPIRED) {
802 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
803 security_xfrm_state_pol_flow_match(x, pol, fl))
810 if (!x && !error && !acquire_in_progress) {
812 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
813 tmpl->id.proto, family)) != NULL) {
818 x = xfrm_state_alloc();
823 /* Initialize temporary selector matching only
824 * to current session. */
825 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
827 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
829 x->km.state = XFRM_STATE_DEAD;
835 if (km_query(x, tmpl, pol) == 0) {
836 x->km.state = XFRM_STATE_ACQ;
837 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
838 h = xfrm_src_hash(daddr, saddr, family);
839 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
841 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
842 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
845 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
846 add_timer(&x->timer);
848 xfrm_hash_grow_check(x->bydst.next != NULL);
850 x->km.state = XFRM_STATE_DEAD;
860 *err = acquire_in_progress ? -EAGAIN : error;
861 spin_unlock_bh(&xfrm_state_lock);
866 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
867 unsigned short family, u8 mode, u8 proto, u32 reqid)
870 struct xfrm_state *rx = NULL, *x = NULL;
871 struct hlist_node *entry;
873 spin_lock(&xfrm_state_lock);
874 h = xfrm_dst_hash(daddr, saddr, reqid, family);
875 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
876 if (x->props.family == family &&
877 x->props.reqid == reqid &&
878 !(x->props.flags & XFRM_STATE_WILDRECV) &&
879 xfrm_state_addr_check(x, daddr, saddr, family) &&
880 mode == x->props.mode &&
881 proto == x->id.proto &&
882 x->km.state == XFRM_STATE_VALID) {
890 spin_unlock(&xfrm_state_lock);
895 EXPORT_SYMBOL(xfrm_stateonly_find);
897 static void __xfrm_state_insert(struct xfrm_state *x)
901 x->genid = ++xfrm_state_genid;
903 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
904 x->props.reqid, x->props.family);
905 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
907 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
908 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
911 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
914 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
917 mod_timer(&x->timer, jiffies + HZ);
918 if (x->replay_maxage)
919 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
925 xfrm_hash_grow_check(x->bydst.next != NULL);
928 /* xfrm_state_lock is held */
929 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
931 unsigned short family = xnew->props.family;
932 u32 reqid = xnew->props.reqid;
933 struct xfrm_state *x;
934 struct hlist_node *entry;
937 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
938 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
939 if (x->props.family == family &&
940 x->props.reqid == reqid &&
941 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
942 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
943 x->genid = xfrm_state_genid;
947 void xfrm_state_insert(struct xfrm_state *x)
949 spin_lock_bh(&xfrm_state_lock);
950 __xfrm_state_bump_genids(x);
951 __xfrm_state_insert(x);
952 spin_unlock_bh(&xfrm_state_lock);
954 EXPORT_SYMBOL(xfrm_state_insert);
956 /* xfrm_state_lock is held */
957 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
959 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
960 struct hlist_node *entry;
961 struct xfrm_state *x;
963 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
964 if (x->props.reqid != reqid ||
965 x->props.mode != mode ||
966 x->props.family != family ||
967 x->km.state != XFRM_STATE_ACQ ||
969 x->id.proto != proto)
974 if (x->id.daddr.a4 != daddr->a4 ||
975 x->props.saddr.a4 != saddr->a4)
979 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
980 (struct in6_addr *)daddr) ||
981 !ipv6_addr_equal((struct in6_addr *)
983 (struct in6_addr *)saddr))
995 x = xfrm_state_alloc();
999 x->sel.daddr.a4 = daddr->a4;
1000 x->sel.saddr.a4 = saddr->a4;
1001 x->sel.prefixlen_d = 32;
1002 x->sel.prefixlen_s = 32;
1003 x->props.saddr.a4 = saddr->a4;
1004 x->id.daddr.a4 = daddr->a4;
1008 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1009 (struct in6_addr *)daddr);
1010 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1011 (struct in6_addr *)saddr);
1012 x->sel.prefixlen_d = 128;
1013 x->sel.prefixlen_s = 128;
1014 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1015 (struct in6_addr *)saddr);
1016 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1017 (struct in6_addr *)daddr);
1021 x->km.state = XFRM_STATE_ACQ;
1022 x->id.proto = proto;
1023 x->props.family = family;
1024 x->props.mode = mode;
1025 x->props.reqid = reqid;
1026 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1028 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1029 add_timer(&x->timer);
1030 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1031 h = xfrm_src_hash(daddr, saddr, family);
1032 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1036 xfrm_hash_grow_check(x->bydst.next != NULL);
1042 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1044 int xfrm_state_add(struct xfrm_state *x)
1046 struct xfrm_state *x1;
1049 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1051 family = x->props.family;
1053 spin_lock_bh(&xfrm_state_lock);
1055 x1 = __xfrm_state_locate(x, use_spi, family);
1063 if (use_spi && x->km.seq) {
1064 x1 = __xfrm_find_acq_byseq(x->km.seq);
1065 if (x1 && ((x1->id.proto != x->id.proto) ||
1066 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1073 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1075 &x->id.daddr, &x->props.saddr, 0);
1077 __xfrm_state_bump_genids(x);
1078 __xfrm_state_insert(x);
1082 spin_unlock_bh(&xfrm_state_lock);
1085 xfrm_state_delete(x1);
1091 EXPORT_SYMBOL(xfrm_state_add);
1093 #ifdef CONFIG_XFRM_MIGRATE
1094 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1097 struct xfrm_state *x = xfrm_state_alloc();
1101 memcpy(&x->id, &orig->id, sizeof(x->id));
1102 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1103 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1104 x->props.mode = orig->props.mode;
1105 x->props.replay_window = orig->props.replay_window;
1106 x->props.reqid = orig->props.reqid;
1107 x->props.family = orig->props.family;
1108 x->props.saddr = orig->props.saddr;
1111 x->aalg = xfrm_algo_clone(orig->aalg);
1115 x->props.aalgo = orig->props.aalgo;
1118 x->ealg = xfrm_algo_clone(orig->ealg);
1122 x->props.ealgo = orig->props.ealgo;
1125 x->calg = xfrm_algo_clone(orig->calg);
1129 x->props.calgo = orig->props.calgo;
1132 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1138 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1144 err = xfrm_init_state(x);
1148 x->props.flags = orig->props.flags;
1150 x->curlft.add_time = orig->curlft.add_time;
1151 x->km.state = orig->km.state;
1152 x->km.seq = orig->km.seq;
1169 EXPORT_SYMBOL(xfrm_state_clone);
1171 /* xfrm_state_lock is held */
1172 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1175 struct xfrm_state *x;
1176 struct hlist_node *entry;
1179 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1180 m->reqid, m->old_family);
1181 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1182 if (x->props.mode != m->mode ||
1183 x->id.proto != m->proto)
1185 if (m->reqid && x->props.reqid != m->reqid)
1187 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1189 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1196 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1198 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1199 if (x->props.mode != m->mode ||
1200 x->id.proto != m->proto)
1202 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1214 EXPORT_SYMBOL(xfrm_migrate_state_find);
1216 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1217 struct xfrm_migrate *m)
1219 struct xfrm_state *xc;
1222 xc = xfrm_state_clone(x, &err);
1226 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1227 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1230 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1231 /* a care is needed when the destination address of the
1232 state is to be updated as it is a part of triplet */
1233 xfrm_state_insert(xc);
1235 if ((err = xfrm_state_add(xc)) < 0)
1244 EXPORT_SYMBOL(xfrm_state_migrate);
1247 int xfrm_state_update(struct xfrm_state *x)
1249 struct xfrm_state *x1;
1251 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1253 spin_lock_bh(&xfrm_state_lock);
1254 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1260 if (xfrm_state_kern(x1)) {
1266 if (x1->km.state == XFRM_STATE_ACQ) {
1267 __xfrm_state_insert(x);
1273 spin_unlock_bh(&xfrm_state_lock);
1279 xfrm_state_delete(x1);
1285 spin_lock_bh(&x1->lock);
1286 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1287 if (x->encap && x1->encap)
1288 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1289 if (x->coaddr && x1->coaddr) {
1290 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1292 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1293 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1294 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1297 mod_timer(&x1->timer, jiffies + HZ);
1298 if (x1->curlft.use_time)
1299 xfrm_state_check_expire(x1);
1303 spin_unlock_bh(&x1->lock);
1309 EXPORT_SYMBOL(xfrm_state_update);
1311 int xfrm_state_check_expire(struct xfrm_state *x)
1313 if (!x->curlft.use_time)
1314 x->curlft.use_time = get_seconds();
1316 if (x->km.state != XFRM_STATE_VALID)
1319 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1320 x->curlft.packets >= x->lft.hard_packet_limit) {
1321 x->km.state = XFRM_STATE_EXPIRED;
1322 mod_timer(&x->timer, jiffies);
1327 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1328 x->curlft.packets >= x->lft.soft_packet_limit)) {
1330 km_state_expired(x, 0, 0);
1334 EXPORT_SYMBOL(xfrm_state_check_expire);
1337 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1338 unsigned short family)
1340 struct xfrm_state *x;
1342 spin_lock_bh(&xfrm_state_lock);
1343 x = __xfrm_state_lookup(daddr, spi, proto, family);
1344 spin_unlock_bh(&xfrm_state_lock);
1347 EXPORT_SYMBOL(xfrm_state_lookup);
1350 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1351 u8 proto, unsigned short family)
1353 struct xfrm_state *x;
1355 spin_lock_bh(&xfrm_state_lock);
1356 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1357 spin_unlock_bh(&xfrm_state_lock);
1360 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1363 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1364 xfrm_address_t *daddr, xfrm_address_t *saddr,
1365 int create, unsigned short family)
1367 struct xfrm_state *x;
1369 spin_lock_bh(&xfrm_state_lock);
1370 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1371 spin_unlock_bh(&xfrm_state_lock);
1375 EXPORT_SYMBOL(xfrm_find_acq);
1377 #ifdef CONFIG_XFRM_SUB_POLICY
1379 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1380 unsigned short family)
1383 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1385 return -EAFNOSUPPORT;
1387 spin_lock_bh(&xfrm_state_lock);
1388 if (afinfo->tmpl_sort)
1389 err = afinfo->tmpl_sort(dst, src, n);
1390 spin_unlock_bh(&xfrm_state_lock);
1391 xfrm_state_put_afinfo(afinfo);
1394 EXPORT_SYMBOL(xfrm_tmpl_sort);
1397 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1398 unsigned short family)
1401 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1403 return -EAFNOSUPPORT;
1405 spin_lock_bh(&xfrm_state_lock);
1406 if (afinfo->state_sort)
1407 err = afinfo->state_sort(dst, src, n);
1408 spin_unlock_bh(&xfrm_state_lock);
1409 xfrm_state_put_afinfo(afinfo);
1412 EXPORT_SYMBOL(xfrm_state_sort);
1415 /* Silly enough, but I'm lazy to build resolution list */
1417 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1421 for (i = 0; i <= xfrm_state_hmask; i++) {
1422 struct hlist_node *entry;
1423 struct xfrm_state *x;
1425 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1426 if (x->km.seq == seq &&
1427 x->km.state == XFRM_STATE_ACQ) {
1436 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1438 struct xfrm_state *x;
1440 spin_lock_bh(&xfrm_state_lock);
1441 x = __xfrm_find_acq_byseq(seq);
1442 spin_unlock_bh(&xfrm_state_lock);
1445 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1447 u32 xfrm_get_acqseq(void)
1451 static DEFINE_SPINLOCK(acqseq_lock);
1453 spin_lock_bh(&acqseq_lock);
1454 res = (++acqseq ? : ++acqseq);
1455 spin_unlock_bh(&acqseq_lock);
1458 EXPORT_SYMBOL(xfrm_get_acqseq);
1460 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1463 struct xfrm_state *x0;
1465 __be32 minspi = htonl(low);
1466 __be32 maxspi = htonl(high);
1468 spin_lock_bh(&x->lock);
1469 if (x->km.state == XFRM_STATE_DEAD)
1478 if (minspi == maxspi) {
1479 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1487 for (h=0; h<high-low+1; h++) {
1488 spi = low + net_random()%(high-low+1);
1489 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1491 x->id.spi = htonl(spi);
1498 spin_lock_bh(&xfrm_state_lock);
1499 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1500 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1501 spin_unlock_bh(&xfrm_state_lock);
1507 spin_unlock_bh(&x->lock);
1511 EXPORT_SYMBOL(xfrm_alloc_spi);
1513 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1517 struct xfrm_state *x, *last = NULL;
1518 struct hlist_node *entry;
1522 spin_lock_bh(&xfrm_state_lock);
1523 for (i = 0; i <= xfrm_state_hmask; i++) {
1524 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1525 if (!xfrm_id_proto_match(x->id.proto, proto))
1528 err = func(last, count, data);
1540 err = func(last, 0, data);
1542 spin_unlock_bh(&xfrm_state_lock);
1545 EXPORT_SYMBOL(xfrm_state_walk);
1548 void xfrm_replay_notify(struct xfrm_state *x, int event)
1551 /* we send notify messages in case
1552 * 1. we updated on of the sequence numbers, and the seqno difference
1553 * is at least x->replay_maxdiff, in this case we also update the
1554 * timeout of our timer function
1555 * 2. if x->replay_maxage has elapsed since last update,
1556 * and there were changes
1558 * The state structure must be locked!
1562 case XFRM_REPLAY_UPDATE:
1563 if (x->replay_maxdiff &&
1564 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1565 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1566 if (x->xflags & XFRM_TIME_DEFER)
1567 event = XFRM_REPLAY_TIMEOUT;
1574 case XFRM_REPLAY_TIMEOUT:
1575 if ((x->replay.seq == x->preplay.seq) &&
1576 (x->replay.bitmap == x->preplay.bitmap) &&
1577 (x->replay.oseq == x->preplay.oseq)) {
1578 x->xflags |= XFRM_TIME_DEFER;
1585 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1586 c.event = XFRM_MSG_NEWAE;
1587 c.data.aevent = event;
1588 km_state_notify(x, &c);
1590 if (x->replay_maxage &&
1591 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1592 x->xflags &= ~XFRM_TIME_DEFER;
1595 static void xfrm_replay_timer_handler(unsigned long data)
1597 struct xfrm_state *x = (struct xfrm_state*)data;
1599 spin_lock(&x->lock);
1601 if (x->km.state == XFRM_STATE_VALID) {
1602 if (xfrm_aevent_is_on())
1603 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1605 x->xflags |= XFRM_TIME_DEFER;
1608 spin_unlock(&x->lock);
1611 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1614 u32 seq = ntohl(net_seq);
1616 if (unlikely(seq == 0))
1619 if (likely(seq > x->replay.seq))
1622 diff = x->replay.seq - seq;
1623 if (diff >= min_t(unsigned int, x->props.replay_window,
1624 sizeof(x->replay.bitmap) * 8)) {
1625 x->stats.replay_window++;
1629 if (x->replay.bitmap & (1U << diff)) {
1635 EXPORT_SYMBOL(xfrm_replay_check);
1637 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1640 u32 seq = ntohl(net_seq);
1642 if (seq > x->replay.seq) {
1643 diff = seq - x->replay.seq;
1644 if (diff < x->props.replay_window)
1645 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1647 x->replay.bitmap = 1;
1648 x->replay.seq = seq;
1650 diff = x->replay.seq - seq;
1651 x->replay.bitmap |= (1U << diff);
1654 if (xfrm_aevent_is_on())
1655 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1657 EXPORT_SYMBOL(xfrm_replay_advance);
1659 static LIST_HEAD(xfrm_km_list);
1660 static DEFINE_RWLOCK(xfrm_km_lock);
1662 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1664 struct xfrm_mgr *km;
1666 read_lock(&xfrm_km_lock);
1667 list_for_each_entry(km, &xfrm_km_list, list)
1668 if (km->notify_policy)
1669 km->notify_policy(xp, dir, c);
1670 read_unlock(&xfrm_km_lock);
1673 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1675 struct xfrm_mgr *km;
1676 read_lock(&xfrm_km_lock);
1677 list_for_each_entry(km, &xfrm_km_list, list)
1680 read_unlock(&xfrm_km_lock);
1683 EXPORT_SYMBOL(km_policy_notify);
1684 EXPORT_SYMBOL(km_state_notify);
1686 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1692 c.event = XFRM_MSG_EXPIRE;
1693 km_state_notify(x, &c);
1699 EXPORT_SYMBOL(km_state_expired);
1701 * We send to all registered managers regardless of failure
1702 * We are happy with one success
1704 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1706 int err = -EINVAL, acqret;
1707 struct xfrm_mgr *km;
1709 read_lock(&xfrm_km_lock);
1710 list_for_each_entry(km, &xfrm_km_list, list) {
1711 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1715 read_unlock(&xfrm_km_lock);
1718 EXPORT_SYMBOL(km_query);
1720 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1723 struct xfrm_mgr *km;
1725 read_lock(&xfrm_km_lock);
1726 list_for_each_entry(km, &xfrm_km_list, list) {
1727 if (km->new_mapping)
1728 err = km->new_mapping(x, ipaddr, sport);
1732 read_unlock(&xfrm_km_lock);
1735 EXPORT_SYMBOL(km_new_mapping);
1737 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1743 c.event = XFRM_MSG_POLEXPIRE;
1744 km_policy_notify(pol, dir, &c);
1749 EXPORT_SYMBOL(km_policy_expired);
1751 #ifdef CONFIG_XFRM_MIGRATE
1752 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1753 struct xfrm_migrate *m, int num_migrate)
1757 struct xfrm_mgr *km;
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1762 ret = km->migrate(sel, dir, type, m, num_migrate);
1767 read_unlock(&xfrm_km_lock);
1770 EXPORT_SYMBOL(km_migrate);
1773 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1777 struct xfrm_mgr *km;
1779 read_lock(&xfrm_km_lock);
1780 list_for_each_entry(km, &xfrm_km_list, list) {
1782 ret = km->report(proto, sel, addr);
1787 read_unlock(&xfrm_km_lock);
1790 EXPORT_SYMBOL(km_report);
1792 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1796 struct xfrm_mgr *km;
1797 struct xfrm_policy *pol = NULL;
1799 if (optlen <= 0 || optlen > PAGE_SIZE)
1802 data = kmalloc(optlen, GFP_KERNEL);
1807 if (copy_from_user(data, optval, optlen))
1811 read_lock(&xfrm_km_lock);
1812 list_for_each_entry(km, &xfrm_km_list, list) {
1813 pol = km->compile_policy(sk, optname, data,
1818 read_unlock(&xfrm_km_lock);
1821 xfrm_sk_policy_insert(sk, err, pol);
1830 EXPORT_SYMBOL(xfrm_user_policy);
1832 int xfrm_register_km(struct xfrm_mgr *km)
1834 write_lock_bh(&xfrm_km_lock);
1835 list_add_tail(&km->list, &xfrm_km_list);
1836 write_unlock_bh(&xfrm_km_lock);
1839 EXPORT_SYMBOL(xfrm_register_km);
1841 int xfrm_unregister_km(struct xfrm_mgr *km)
1843 write_lock_bh(&xfrm_km_lock);
1844 list_del(&km->list);
1845 write_unlock_bh(&xfrm_km_lock);
1848 EXPORT_SYMBOL(xfrm_unregister_km);
1850 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1853 if (unlikely(afinfo == NULL))
1855 if (unlikely(afinfo->family >= NPROTO))
1856 return -EAFNOSUPPORT;
1857 write_lock_bh(&xfrm_state_afinfo_lock);
1858 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1861 xfrm_state_afinfo[afinfo->family] = afinfo;
1862 write_unlock_bh(&xfrm_state_afinfo_lock);
1865 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1867 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1870 if (unlikely(afinfo == NULL))
1872 if (unlikely(afinfo->family >= NPROTO))
1873 return -EAFNOSUPPORT;
1874 write_lock_bh(&xfrm_state_afinfo_lock);
1875 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1876 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1879 xfrm_state_afinfo[afinfo->family] = NULL;
1881 write_unlock_bh(&xfrm_state_afinfo_lock);
1884 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1886 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1888 struct xfrm_state_afinfo *afinfo;
1889 if (unlikely(family >= NPROTO))
1891 read_lock(&xfrm_state_afinfo_lock);
1892 afinfo = xfrm_state_afinfo[family];
1893 if (unlikely(!afinfo))
1894 read_unlock(&xfrm_state_afinfo_lock);
1898 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1900 read_unlock(&xfrm_state_afinfo_lock);
1903 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1904 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1907 struct xfrm_state *t = x->tunnel;
1909 if (atomic_read(&t->tunnel_users) == 2)
1910 xfrm_state_delete(t);
1911 atomic_dec(&t->tunnel_users);
1916 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1918 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1922 spin_lock_bh(&x->lock);
1923 if (x->km.state == XFRM_STATE_VALID &&
1924 x->type && x->type->get_mtu)
1925 res = x->type->get_mtu(x, mtu);
1927 res = mtu - x->props.header_len;
1928 spin_unlock_bh(&x->lock);
1932 int xfrm_init_state(struct xfrm_state *x)
1934 struct xfrm_state_afinfo *afinfo;
1935 int family = x->props.family;
1938 err = -EAFNOSUPPORT;
1939 afinfo = xfrm_state_get_afinfo(family);
1944 if (afinfo->init_flags)
1945 err = afinfo->init_flags(x);
1947 xfrm_state_put_afinfo(afinfo);
1952 err = -EPROTONOSUPPORT;
1953 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1954 if (x->inner_mode == NULL)
1957 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1958 family != x->sel.family)
1961 x->type = xfrm_get_type(x->id.proto, family);
1962 if (x->type == NULL)
1965 err = x->type->init_state(x);
1969 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1970 if (x->outer_mode == NULL)
1973 x->km.state = XFRM_STATE_VALID;
1979 EXPORT_SYMBOL(xfrm_init_state);
1981 void __init xfrm_state_init(void)
1985 sz = sizeof(struct hlist_head) * 8;
1987 xfrm_state_bydst = xfrm_hash_alloc(sz);
1988 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1989 xfrm_state_byspi = xfrm_hash_alloc(sz);
1990 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1991 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1992 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1994 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1997 #ifdef CONFIG_AUDITSYSCALL
1998 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1999 struct audit_buffer *audit_buf)
2002 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2003 x->security->ctx_alg, x->security->ctx_doi,
2004 x->security->ctx_str);
2006 switch(x->props.family) {
2008 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2009 NIPQUAD(x->props.saddr.a4),
2010 NIPQUAD(x->id.daddr.a4));
2014 struct in6_addr saddr6, daddr6;
2016 memcpy(&saddr6, x->props.saddr.a6,
2017 sizeof(struct in6_addr));
2018 memcpy(&daddr6, x->id.daddr.a6,
2019 sizeof(struct in6_addr));
2020 audit_log_format(audit_buf,
2021 " src=" NIP6_FMT " dst=" NIP6_FMT,
2022 NIP6(saddr6), NIP6(daddr6));
2029 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2031 struct audit_buffer *audit_buf;
2033 extern int audit_enabled;
2035 if (audit_enabled == 0)
2037 audit_buf = xfrm_audit_start(auid, sid);
2038 if (audit_buf == NULL)
2040 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2041 xfrm_audit_common_stateinfo(x, audit_buf);
2042 spi = ntohl(x->id.spi);
2043 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2044 audit_log_end(audit_buf);
2046 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2049 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2051 struct audit_buffer *audit_buf;
2053 extern int audit_enabled;
2055 if (audit_enabled == 0)
2057 audit_buf = xfrm_audit_start(auid, sid);
2058 if (audit_buf == NULL)
2060 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2061 xfrm_audit_common_stateinfo(x, audit_buf);
2062 spi = ntohl(x->id.spi);
2063 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2064 audit_log_end(audit_buf);
2066 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2067 #endif /* CONFIG_AUDITSYSCALL */