6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
66 unsigned short family)
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
90 struct hlist_node *entry, *tmp;
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
99 hlist_add_head(&x->bydst, ndsttable+h);
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
104 hlist_add_head(&x->bysrc, nsrctable+h);
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
110 hlist_add_head(&x->byspi, nspitable+h);
115 static unsigned long xfrm_hash_new_size(void)
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
121 static DEFINE_MUTEX(hash_resize_mutex);
123 static void xfrm_hash_resize(struct work_struct *__unused)
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
130 mutex_lock(&hash_resize_mutex);
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
136 nsrc = xfrm_hash_alloc(nsize);
138 xfrm_hash_free(ndst, nsize);
141 nspi = xfrm_hash_alloc(nsize);
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
148 spin_lock_bh(&xfrm_state_lock);
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
165 spin_unlock_bh(&xfrm_state_lock);
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
173 mutex_unlock(&hash_resize_mutex);
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
188 int __xfrm_state_delete(struct xfrm_state *x);
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
207 write_unlock_bh(&xfrm_state_afinfo_lock);
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
224 xfrm_state_unlock_afinfo(afinfo);
227 EXPORT_SYMBOL(xfrm_register_type);
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
239 if (unlikely(typemap[type->proto] != type))
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
246 EXPORT_SYMBOL(xfrm_unregister_type);
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
259 typemap = afinfo->type_map;
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
271 xfrm_state_put_afinfo(afinfo);
275 static void xfrm_put_type(struct xfrm_type *type)
277 module_put(type->owner);
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
299 if (!try_module_get(afinfo->owner))
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
307 xfrm_state_unlock_afinfo(afinfo);
310 EXPORT_SYMBOL(xfrm_register_mode);
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
333 xfrm_state_unlock_afinfo(afinfo);
336 EXPORT_SYMBOL(xfrm_unregister_mode);
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
344 if (unlikely(encap >= XFRM_MODE_MAX))
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
362 xfrm_state_put_afinfo(afinfo);
366 static void xfrm_put_mode(struct xfrm_mode *mode)
368 module_put(mode->owner);
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
381 xfrm_put_mode(x->inner_mode);
383 xfrm_put_mode(x->outer_mode);
385 x->type->destructor(x);
386 xfrm_put_type(x->type);
388 security_xfrm_state_free(x);
392 static void xfrm_state_gc_task(struct work_struct *data)
394 struct xfrm_state *x;
395 struct hlist_node *entry, *tmp;
396 struct hlist_head gc_list;
398 spin_lock_bh(&xfrm_state_gc_lock);
399 gc_list.first = xfrm_state_gc_list.first;
400 INIT_HLIST_HEAD(&xfrm_state_gc_list);
401 spin_unlock_bh(&xfrm_state_gc_lock);
403 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
404 xfrm_state_gc_destroy(x);
409 static inline unsigned long make_jiffies(long secs)
411 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
412 return MAX_SCHEDULE_TIMEOUT-1;
417 static void xfrm_timer_handler(unsigned long data)
419 struct xfrm_state *x = (struct xfrm_state*)data;
420 unsigned long now = get_seconds();
421 long next = LONG_MAX;
426 if (x->km.state == XFRM_STATE_DEAD)
428 if (x->km.state == XFRM_STATE_EXPIRED)
430 if (x->lft.hard_add_expires_seconds) {
431 long tmo = x->lft.hard_add_expires_seconds +
432 x->curlft.add_time - now;
438 if (x->lft.hard_use_expires_seconds) {
439 long tmo = x->lft.hard_use_expires_seconds +
440 (x->curlft.use_time ? : now) - now;
448 if (x->lft.soft_add_expires_seconds) {
449 long tmo = x->lft.soft_add_expires_seconds +
450 x->curlft.add_time - now;
456 if (x->lft.soft_use_expires_seconds) {
457 long tmo = x->lft.soft_use_expires_seconds +
458 (x->curlft.use_time ? : now) - now;
467 km_state_expired(x, 0, 0);
469 if (next != LONG_MAX)
470 mod_timer(&x->timer, jiffies + make_jiffies(next));
475 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
476 x->km.state = XFRM_STATE_EXPIRED;
482 err = __xfrm_state_delete(x);
483 if (!err && x->id.spi)
484 km_state_expired(x, 1, 0);
486 xfrm_audit_state_delete(x, err ? 0 : 1,
487 audit_get_loginuid(current->audit_context), 0);
490 spin_unlock(&x->lock);
493 static void xfrm_replay_timer_handler(unsigned long data);
495 struct xfrm_state *xfrm_state_alloc(void)
497 struct xfrm_state *x;
499 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
502 atomic_set(&x->refcnt, 1);
503 atomic_set(&x->tunnel_users, 0);
504 INIT_HLIST_NODE(&x->bydst);
505 INIT_HLIST_NODE(&x->bysrc);
506 INIT_HLIST_NODE(&x->byspi);
507 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
508 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
510 x->curlft.add_time = get_seconds();
511 x->lft.soft_byte_limit = XFRM_INF;
512 x->lft.soft_packet_limit = XFRM_INF;
513 x->lft.hard_byte_limit = XFRM_INF;
514 x->lft.hard_packet_limit = XFRM_INF;
515 x->replay_maxage = 0;
516 x->replay_maxdiff = 0;
517 spin_lock_init(&x->lock);
521 EXPORT_SYMBOL(xfrm_state_alloc);
523 void __xfrm_state_destroy(struct xfrm_state *x)
525 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
527 spin_lock_bh(&xfrm_state_gc_lock);
528 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
529 spin_unlock_bh(&xfrm_state_gc_lock);
530 schedule_work(&xfrm_state_gc_work);
532 EXPORT_SYMBOL(__xfrm_state_destroy);
534 int __xfrm_state_delete(struct xfrm_state *x)
538 if (x->km.state != XFRM_STATE_DEAD) {
539 x->km.state = XFRM_STATE_DEAD;
540 spin_lock(&xfrm_state_lock);
541 hlist_del(&x->bydst);
542 hlist_del(&x->bysrc);
544 hlist_del(&x->byspi);
546 spin_unlock(&xfrm_state_lock);
548 /* All xfrm_state objects are created by xfrm_state_alloc.
549 * The xfrm_state_alloc call gives a reference, and that
550 * is what we are dropping here.
558 EXPORT_SYMBOL(__xfrm_state_delete);
560 int xfrm_state_delete(struct xfrm_state *x)
564 spin_lock_bh(&x->lock);
565 err = __xfrm_state_delete(x);
566 spin_unlock_bh(&x->lock);
570 EXPORT_SYMBOL(xfrm_state_delete);
572 #ifdef CONFIG_SECURITY_NETWORK_XFRM
574 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
578 for (i = 0; i <= xfrm_state_hmask; i++) {
579 struct hlist_node *entry;
580 struct xfrm_state *x;
582 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
583 if (xfrm_id_proto_match(x->id.proto, proto) &&
584 (err = security_xfrm_state_delete(x)) != 0) {
585 xfrm_audit_state_delete(x, 0,
586 audit_info->loginuid,
597 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
603 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
607 spin_lock_bh(&xfrm_state_lock);
608 err = xfrm_state_flush_secctx_check(proto, audit_info);
612 for (i = 0; i <= xfrm_state_hmask; i++) {
613 struct hlist_node *entry;
614 struct xfrm_state *x;
616 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
617 if (!xfrm_state_kern(x) &&
618 xfrm_id_proto_match(x->id.proto, proto)) {
620 spin_unlock_bh(&xfrm_state_lock);
622 err = xfrm_state_delete(x);
623 xfrm_audit_state_delete(x, err ? 0 : 1,
624 audit_info->loginuid,
628 spin_lock_bh(&xfrm_state_lock);
636 spin_unlock_bh(&xfrm_state_lock);
640 EXPORT_SYMBOL(xfrm_state_flush);
642 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
644 spin_lock_bh(&xfrm_state_lock);
645 si->sadcnt = xfrm_state_num;
646 si->sadhcnt = xfrm_state_hmask;
647 si->sadhmcnt = xfrm_state_hashmax;
648 spin_unlock_bh(&xfrm_state_lock);
650 EXPORT_SYMBOL(xfrm_sad_getinfo);
653 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
654 struct xfrm_tmpl *tmpl,
655 xfrm_address_t *daddr, xfrm_address_t *saddr,
656 unsigned short family)
658 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
661 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
662 xfrm_state_put_afinfo(afinfo);
666 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
668 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
669 struct xfrm_state *x;
670 struct hlist_node *entry;
672 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
673 if (x->props.family != family ||
675 x->id.proto != proto)
680 if (x->id.daddr.a4 != daddr->a4)
684 if (!ipv6_addr_equal((struct in6_addr *)daddr,
698 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
700 unsigned int h = xfrm_src_hash(daddr, saddr, family);
701 struct xfrm_state *x;
702 struct hlist_node *entry;
704 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
705 if (x->props.family != family ||
706 x->id.proto != proto)
711 if (x->id.daddr.a4 != daddr->a4 ||
712 x->props.saddr.a4 != saddr->a4)
716 if (!ipv6_addr_equal((struct in6_addr *)daddr,
719 !ipv6_addr_equal((struct in6_addr *)saddr,
733 static inline struct xfrm_state *
734 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
737 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
738 x->id.proto, family);
740 return __xfrm_state_lookup_byaddr(&x->id.daddr,
742 x->id.proto, family);
745 static void xfrm_hash_grow_check(int have_hash_collision)
747 if (have_hash_collision &&
748 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
749 xfrm_state_num > xfrm_state_hmask)
750 schedule_work(&xfrm_hash_work);
754 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
755 struct flowi *fl, struct xfrm_tmpl *tmpl,
756 struct xfrm_policy *pol, int *err,
757 unsigned short family)
759 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
760 struct hlist_node *entry;
761 struct xfrm_state *x, *x0;
762 int acquire_in_progress = 0;
764 struct xfrm_state *best = NULL;
766 spin_lock_bh(&xfrm_state_lock);
767 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
768 if (x->props.family == family &&
769 x->props.reqid == tmpl->reqid &&
770 !(x->props.flags & XFRM_STATE_WILDRECV) &&
771 xfrm_state_addr_check(x, daddr, saddr, family) &&
772 tmpl->mode == x->props.mode &&
773 tmpl->id.proto == x->id.proto &&
774 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
776 1. There is a valid state with matching selector.
778 2. Valid state with inappropriate selector. Skip.
780 Entering area of "sysdeps".
782 3. If state is not valid, selector is temporary,
783 it selects only session which triggered
784 previous resolution. Key manager will do
785 something to install a state with proper
788 if (x->km.state == XFRM_STATE_VALID) {
789 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
790 !security_xfrm_state_pol_flow_match(x, pol, fl))
793 best->km.dying > x->km.dying ||
794 (best->km.dying == x->km.dying &&
795 best->curlft.add_time < x->curlft.add_time))
797 } else if (x->km.state == XFRM_STATE_ACQ) {
798 acquire_in_progress = 1;
799 } else if (x->km.state == XFRM_STATE_ERROR ||
800 x->km.state == XFRM_STATE_EXPIRED) {
801 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
802 security_xfrm_state_pol_flow_match(x, pol, fl))
809 if (!x && !error && !acquire_in_progress) {
811 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
812 tmpl->id.proto, family)) != NULL) {
817 x = xfrm_state_alloc();
822 /* Initialize temporary selector matching only
823 * to current session. */
824 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
826 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
828 x->km.state = XFRM_STATE_DEAD;
834 if (km_query(x, tmpl, pol) == 0) {
835 x->km.state = XFRM_STATE_ACQ;
836 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
837 h = xfrm_src_hash(daddr, saddr, family);
838 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
840 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
841 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
843 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
844 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
845 add_timer(&x->timer);
847 xfrm_hash_grow_check(x->bydst.next != NULL);
849 x->km.state = XFRM_STATE_DEAD;
859 *err = acquire_in_progress ? -EAGAIN : error;
860 spin_unlock_bh(&xfrm_state_lock);
865 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
866 unsigned short family, u8 mode, u8 proto, u32 reqid)
868 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
869 struct xfrm_state *rx = NULL, *x = NULL;
870 struct hlist_node *entry;
872 spin_lock(&xfrm_state_lock);
873 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
874 if (x->props.family == family &&
875 x->props.reqid == reqid &&
876 !(x->props.flags & XFRM_STATE_WILDRECV) &&
877 xfrm_state_addr_check(x, daddr, saddr, family) &&
878 mode == x->props.mode &&
879 proto == x->id.proto &&
880 x->km.state == XFRM_STATE_VALID) {
888 spin_unlock(&xfrm_state_lock);
893 EXPORT_SYMBOL(xfrm_stateonly_find);
895 static void __xfrm_state_insert(struct xfrm_state *x)
899 x->genid = ++xfrm_state_genid;
901 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
902 x->props.reqid, x->props.family);
903 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
905 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
906 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
909 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
912 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
915 mod_timer(&x->timer, jiffies + HZ);
916 if (x->replay_maxage)
917 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
923 xfrm_hash_grow_check(x->bydst.next != NULL);
926 /* xfrm_state_lock is held */
927 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
929 unsigned short family = xnew->props.family;
930 u32 reqid = xnew->props.reqid;
931 struct xfrm_state *x;
932 struct hlist_node *entry;
935 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
936 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
937 if (x->props.family == family &&
938 x->props.reqid == reqid &&
939 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
940 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
941 x->genid = xfrm_state_genid;
945 void xfrm_state_insert(struct xfrm_state *x)
947 spin_lock_bh(&xfrm_state_lock);
948 __xfrm_state_bump_genids(x);
949 __xfrm_state_insert(x);
950 spin_unlock_bh(&xfrm_state_lock);
952 EXPORT_SYMBOL(xfrm_state_insert);
954 /* xfrm_state_lock is held */
955 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
957 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
958 struct hlist_node *entry;
959 struct xfrm_state *x;
961 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
962 if (x->props.reqid != reqid ||
963 x->props.mode != mode ||
964 x->props.family != family ||
965 x->km.state != XFRM_STATE_ACQ ||
967 x->id.proto != proto)
972 if (x->id.daddr.a4 != daddr->a4 ||
973 x->props.saddr.a4 != saddr->a4)
977 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
978 (struct in6_addr *)daddr) ||
979 !ipv6_addr_equal((struct in6_addr *)
981 (struct in6_addr *)saddr))
993 x = xfrm_state_alloc();
997 x->sel.daddr.a4 = daddr->a4;
998 x->sel.saddr.a4 = saddr->a4;
999 x->sel.prefixlen_d = 32;
1000 x->sel.prefixlen_s = 32;
1001 x->props.saddr.a4 = saddr->a4;
1002 x->id.daddr.a4 = daddr->a4;
1006 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1007 (struct in6_addr *)daddr);
1008 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1009 (struct in6_addr *)saddr);
1010 x->sel.prefixlen_d = 128;
1011 x->sel.prefixlen_s = 128;
1012 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1013 (struct in6_addr *)saddr);
1014 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1015 (struct in6_addr *)daddr);
1019 x->km.state = XFRM_STATE_ACQ;
1020 x->id.proto = proto;
1021 x->props.family = family;
1022 x->props.mode = mode;
1023 x->props.reqid = reqid;
1024 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1026 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1027 add_timer(&x->timer);
1028 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1029 h = xfrm_src_hash(daddr, saddr, family);
1030 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1034 xfrm_hash_grow_check(x->bydst.next != NULL);
1040 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1042 int xfrm_state_add(struct xfrm_state *x)
1044 struct xfrm_state *x1;
1047 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1049 family = x->props.family;
1051 spin_lock_bh(&xfrm_state_lock);
1053 x1 = __xfrm_state_locate(x, use_spi, family);
1061 if (use_spi && x->km.seq) {
1062 x1 = __xfrm_find_acq_byseq(x->km.seq);
1063 if (x1 && ((x1->id.proto != x->id.proto) ||
1064 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1071 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1073 &x->id.daddr, &x->props.saddr, 0);
1075 __xfrm_state_bump_genids(x);
1076 __xfrm_state_insert(x);
1080 spin_unlock_bh(&xfrm_state_lock);
1083 xfrm_state_delete(x1);
1089 EXPORT_SYMBOL(xfrm_state_add);
1091 #ifdef CONFIG_XFRM_MIGRATE
1092 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1095 struct xfrm_state *x = xfrm_state_alloc();
1099 memcpy(&x->id, &orig->id, sizeof(x->id));
1100 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1101 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1102 x->props.mode = orig->props.mode;
1103 x->props.replay_window = orig->props.replay_window;
1104 x->props.reqid = orig->props.reqid;
1105 x->props.family = orig->props.family;
1106 x->props.saddr = orig->props.saddr;
1109 x->aalg = xfrm_algo_clone(orig->aalg);
1113 x->props.aalgo = orig->props.aalgo;
1116 x->ealg = xfrm_algo_clone(orig->ealg);
1120 x->props.ealgo = orig->props.ealgo;
1123 x->calg = xfrm_algo_clone(orig->calg);
1127 x->props.calgo = orig->props.calgo;
1130 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1136 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1142 err = xfrm_init_state(x);
1146 x->props.flags = orig->props.flags;
1148 x->curlft.add_time = orig->curlft.add_time;
1149 x->km.state = orig->km.state;
1150 x->km.seq = orig->km.seq;
1167 EXPORT_SYMBOL(xfrm_state_clone);
1169 /* xfrm_state_lock is held */
1170 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1173 struct xfrm_state *x;
1174 struct hlist_node *entry;
1177 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1178 m->reqid, m->old_family);
1179 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1180 if (x->props.mode != m->mode ||
1181 x->id.proto != m->proto)
1183 if (m->reqid && x->props.reqid != m->reqid)
1185 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1187 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1194 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1196 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1197 if (x->props.mode != m->mode ||
1198 x->id.proto != m->proto)
1200 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1202 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1212 EXPORT_SYMBOL(xfrm_migrate_state_find);
1214 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1215 struct xfrm_migrate *m)
1217 struct xfrm_state *xc;
1220 xc = xfrm_state_clone(x, &err);
1224 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1225 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1228 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1229 /* a care is needed when the destination address of the
1230 state is to be updated as it is a part of triplet */
1231 xfrm_state_insert(xc);
1233 if ((err = xfrm_state_add(xc)) < 0)
1242 EXPORT_SYMBOL(xfrm_state_migrate);
1245 int xfrm_state_update(struct xfrm_state *x)
1247 struct xfrm_state *x1;
1249 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1251 spin_lock_bh(&xfrm_state_lock);
1252 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1258 if (xfrm_state_kern(x1)) {
1264 if (x1->km.state == XFRM_STATE_ACQ) {
1265 __xfrm_state_insert(x);
1271 spin_unlock_bh(&xfrm_state_lock);
1277 xfrm_state_delete(x1);
1283 spin_lock_bh(&x1->lock);
1284 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1285 if (x->encap && x1->encap)
1286 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1287 if (x->coaddr && x1->coaddr) {
1288 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1290 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1291 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1292 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1295 mod_timer(&x1->timer, jiffies + HZ);
1296 if (x1->curlft.use_time)
1297 xfrm_state_check_expire(x1);
1301 spin_unlock_bh(&x1->lock);
1307 EXPORT_SYMBOL(xfrm_state_update);
1309 int xfrm_state_check_expire(struct xfrm_state *x)
1311 if (!x->curlft.use_time)
1312 x->curlft.use_time = get_seconds();
1314 if (x->km.state != XFRM_STATE_VALID)
1317 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1318 x->curlft.packets >= x->lft.hard_packet_limit) {
1319 x->km.state = XFRM_STATE_EXPIRED;
1320 mod_timer(&x->timer, jiffies);
1325 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1326 x->curlft.packets >= x->lft.soft_packet_limit)) {
1328 km_state_expired(x, 0, 0);
1332 EXPORT_SYMBOL(xfrm_state_check_expire);
1335 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1336 unsigned short family)
1338 struct xfrm_state *x;
1340 spin_lock_bh(&xfrm_state_lock);
1341 x = __xfrm_state_lookup(daddr, spi, proto, family);
1342 spin_unlock_bh(&xfrm_state_lock);
1345 EXPORT_SYMBOL(xfrm_state_lookup);
1348 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1349 u8 proto, unsigned short family)
1351 struct xfrm_state *x;
1353 spin_lock_bh(&xfrm_state_lock);
1354 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1355 spin_unlock_bh(&xfrm_state_lock);
1358 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1361 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1362 xfrm_address_t *daddr, xfrm_address_t *saddr,
1363 int create, unsigned short family)
1365 struct xfrm_state *x;
1367 spin_lock_bh(&xfrm_state_lock);
1368 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1369 spin_unlock_bh(&xfrm_state_lock);
1373 EXPORT_SYMBOL(xfrm_find_acq);
1375 #ifdef CONFIG_XFRM_SUB_POLICY
1377 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1378 unsigned short family)
1381 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1383 return -EAFNOSUPPORT;
1385 spin_lock_bh(&xfrm_state_lock);
1386 if (afinfo->tmpl_sort)
1387 err = afinfo->tmpl_sort(dst, src, n);
1388 spin_unlock_bh(&xfrm_state_lock);
1389 xfrm_state_put_afinfo(afinfo);
1392 EXPORT_SYMBOL(xfrm_tmpl_sort);
1395 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1396 unsigned short family)
1399 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1401 return -EAFNOSUPPORT;
1403 spin_lock_bh(&xfrm_state_lock);
1404 if (afinfo->state_sort)
1405 err = afinfo->state_sort(dst, src, n);
1406 spin_unlock_bh(&xfrm_state_lock);
1407 xfrm_state_put_afinfo(afinfo);
1410 EXPORT_SYMBOL(xfrm_state_sort);
1413 /* Silly enough, but I'm lazy to build resolution list */
1415 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1419 for (i = 0; i <= xfrm_state_hmask; i++) {
1420 struct hlist_node *entry;
1421 struct xfrm_state *x;
1423 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1424 if (x->km.seq == seq &&
1425 x->km.state == XFRM_STATE_ACQ) {
1434 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1436 struct xfrm_state *x;
1438 spin_lock_bh(&xfrm_state_lock);
1439 x = __xfrm_find_acq_byseq(seq);
1440 spin_unlock_bh(&xfrm_state_lock);
1443 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1445 u32 xfrm_get_acqseq(void)
1449 static DEFINE_SPINLOCK(acqseq_lock);
1451 spin_lock_bh(&acqseq_lock);
1452 res = (++acqseq ? : ++acqseq);
1453 spin_unlock_bh(&acqseq_lock);
1456 EXPORT_SYMBOL(xfrm_get_acqseq);
1458 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1461 struct xfrm_state *x0;
1463 __be32 minspi = htonl(low);
1464 __be32 maxspi = htonl(high);
1466 spin_lock_bh(&x->lock);
1467 if (x->km.state == XFRM_STATE_DEAD)
1476 if (minspi == maxspi) {
1477 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1485 for (h=0; h<high-low+1; h++) {
1486 spi = low + net_random()%(high-low+1);
1487 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1489 x->id.spi = htonl(spi);
1496 spin_lock_bh(&xfrm_state_lock);
1497 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1498 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1499 spin_unlock_bh(&xfrm_state_lock);
1505 spin_unlock_bh(&x->lock);
1509 EXPORT_SYMBOL(xfrm_alloc_spi);
1511 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1515 struct xfrm_state *x, *last = NULL;
1516 struct hlist_node *entry;
1520 spin_lock_bh(&xfrm_state_lock);
1521 for (i = 0; i <= xfrm_state_hmask; i++) {
1522 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1523 if (!xfrm_id_proto_match(x->id.proto, proto))
1526 err = func(last, count, data);
1538 err = func(last, 0, data);
1540 spin_unlock_bh(&xfrm_state_lock);
1543 EXPORT_SYMBOL(xfrm_state_walk);
1546 void xfrm_replay_notify(struct xfrm_state *x, int event)
1549 /* we send notify messages in case
1550 * 1. we updated on of the sequence numbers, and the seqno difference
1551 * is at least x->replay_maxdiff, in this case we also update the
1552 * timeout of our timer function
1553 * 2. if x->replay_maxage has elapsed since last update,
1554 * and there were changes
1556 * The state structure must be locked!
1560 case XFRM_REPLAY_UPDATE:
1561 if (x->replay_maxdiff &&
1562 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1563 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1564 if (x->xflags & XFRM_TIME_DEFER)
1565 event = XFRM_REPLAY_TIMEOUT;
1572 case XFRM_REPLAY_TIMEOUT:
1573 if ((x->replay.seq == x->preplay.seq) &&
1574 (x->replay.bitmap == x->preplay.bitmap) &&
1575 (x->replay.oseq == x->preplay.oseq)) {
1576 x->xflags |= XFRM_TIME_DEFER;
1583 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1584 c.event = XFRM_MSG_NEWAE;
1585 c.data.aevent = event;
1586 km_state_notify(x, &c);
1588 if (x->replay_maxage &&
1589 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1590 x->xflags &= ~XFRM_TIME_DEFER;
1593 static void xfrm_replay_timer_handler(unsigned long data)
1595 struct xfrm_state *x = (struct xfrm_state*)data;
1597 spin_lock(&x->lock);
1599 if (x->km.state == XFRM_STATE_VALID) {
1600 if (xfrm_aevent_is_on())
1601 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1603 x->xflags |= XFRM_TIME_DEFER;
1606 spin_unlock(&x->lock);
1609 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1612 u32 seq = ntohl(net_seq);
1614 if (unlikely(seq == 0))
1617 if (likely(seq > x->replay.seq))
1620 diff = x->replay.seq - seq;
1621 if (diff >= min_t(unsigned int, x->props.replay_window,
1622 sizeof(x->replay.bitmap) * 8)) {
1623 x->stats.replay_window++;
1627 if (x->replay.bitmap & (1U << diff)) {
1633 EXPORT_SYMBOL(xfrm_replay_check);
1635 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1638 u32 seq = ntohl(net_seq);
1640 if (seq > x->replay.seq) {
1641 diff = seq - x->replay.seq;
1642 if (diff < x->props.replay_window)
1643 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1645 x->replay.bitmap = 1;
1646 x->replay.seq = seq;
1648 diff = x->replay.seq - seq;
1649 x->replay.bitmap |= (1U << diff);
1652 if (xfrm_aevent_is_on())
1653 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1655 EXPORT_SYMBOL(xfrm_replay_advance);
1657 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1658 static DEFINE_RWLOCK(xfrm_km_lock);
1660 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1662 struct xfrm_mgr *km;
1664 read_lock(&xfrm_km_lock);
1665 list_for_each_entry(km, &xfrm_km_list, list)
1666 if (km->notify_policy)
1667 km->notify_policy(xp, dir, c);
1668 read_unlock(&xfrm_km_lock);
1671 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1673 struct xfrm_mgr *km;
1674 read_lock(&xfrm_km_lock);
1675 list_for_each_entry(km, &xfrm_km_list, list)
1678 read_unlock(&xfrm_km_lock);
1681 EXPORT_SYMBOL(km_policy_notify);
1682 EXPORT_SYMBOL(km_state_notify);
1684 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1690 c.event = XFRM_MSG_EXPIRE;
1691 km_state_notify(x, &c);
1697 EXPORT_SYMBOL(km_state_expired);
1699 * We send to all registered managers regardless of failure
1700 * We are happy with one success
1702 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1704 int err = -EINVAL, acqret;
1705 struct xfrm_mgr *km;
1707 read_lock(&xfrm_km_lock);
1708 list_for_each_entry(km, &xfrm_km_list, list) {
1709 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1713 read_unlock(&xfrm_km_lock);
1716 EXPORT_SYMBOL(km_query);
1718 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1721 struct xfrm_mgr *km;
1723 read_lock(&xfrm_km_lock);
1724 list_for_each_entry(km, &xfrm_km_list, list) {
1725 if (km->new_mapping)
1726 err = km->new_mapping(x, ipaddr, sport);
1730 read_unlock(&xfrm_km_lock);
1733 EXPORT_SYMBOL(km_new_mapping);
1735 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1741 c.event = XFRM_MSG_POLEXPIRE;
1742 km_policy_notify(pol, dir, &c);
1747 EXPORT_SYMBOL(km_policy_expired);
1749 #ifdef CONFIG_XFRM_MIGRATE
1750 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1751 struct xfrm_migrate *m, int num_migrate)
1755 struct xfrm_mgr *km;
1757 read_lock(&xfrm_km_lock);
1758 list_for_each_entry(km, &xfrm_km_list, list) {
1760 ret = km->migrate(sel, dir, type, m, num_migrate);
1765 read_unlock(&xfrm_km_lock);
1768 EXPORT_SYMBOL(km_migrate);
1771 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1775 struct xfrm_mgr *km;
1777 read_lock(&xfrm_km_lock);
1778 list_for_each_entry(km, &xfrm_km_list, list) {
1780 ret = km->report(proto, sel, addr);
1785 read_unlock(&xfrm_km_lock);
1788 EXPORT_SYMBOL(km_report);
1790 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1794 struct xfrm_mgr *km;
1795 struct xfrm_policy *pol = NULL;
1797 if (optlen <= 0 || optlen > PAGE_SIZE)
1800 data = kmalloc(optlen, GFP_KERNEL);
1805 if (copy_from_user(data, optval, optlen))
1809 read_lock(&xfrm_km_lock);
1810 list_for_each_entry(km, &xfrm_km_list, list) {
1811 pol = km->compile_policy(sk, optname, data,
1816 read_unlock(&xfrm_km_lock);
1819 xfrm_sk_policy_insert(sk, err, pol);
1828 EXPORT_SYMBOL(xfrm_user_policy);
1830 int xfrm_register_km(struct xfrm_mgr *km)
1832 write_lock_bh(&xfrm_km_lock);
1833 list_add_tail(&km->list, &xfrm_km_list);
1834 write_unlock_bh(&xfrm_km_lock);
1837 EXPORT_SYMBOL(xfrm_register_km);
1839 int xfrm_unregister_km(struct xfrm_mgr *km)
1841 write_lock_bh(&xfrm_km_lock);
1842 list_del(&km->list);
1843 write_unlock_bh(&xfrm_km_lock);
1846 EXPORT_SYMBOL(xfrm_unregister_km);
1848 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1851 if (unlikely(afinfo == NULL))
1853 if (unlikely(afinfo->family >= NPROTO))
1854 return -EAFNOSUPPORT;
1855 write_lock_bh(&xfrm_state_afinfo_lock);
1856 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1859 xfrm_state_afinfo[afinfo->family] = afinfo;
1860 write_unlock_bh(&xfrm_state_afinfo_lock);
1863 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1865 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1868 if (unlikely(afinfo == NULL))
1870 if (unlikely(afinfo->family >= NPROTO))
1871 return -EAFNOSUPPORT;
1872 write_lock_bh(&xfrm_state_afinfo_lock);
1873 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1874 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1877 xfrm_state_afinfo[afinfo->family] = NULL;
1879 write_unlock_bh(&xfrm_state_afinfo_lock);
1882 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1884 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1886 struct xfrm_state_afinfo *afinfo;
1887 if (unlikely(family >= NPROTO))
1889 read_lock(&xfrm_state_afinfo_lock);
1890 afinfo = xfrm_state_afinfo[family];
1891 if (unlikely(!afinfo))
1892 read_unlock(&xfrm_state_afinfo_lock);
1896 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1898 read_unlock(&xfrm_state_afinfo_lock);
1901 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1902 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1905 struct xfrm_state *t = x->tunnel;
1907 if (atomic_read(&t->tunnel_users) == 2)
1908 xfrm_state_delete(t);
1909 atomic_dec(&t->tunnel_users);
1914 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1916 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1920 spin_lock_bh(&x->lock);
1921 if (x->km.state == XFRM_STATE_VALID &&
1922 x->type && x->type->get_mtu)
1923 res = x->type->get_mtu(x, mtu);
1925 res = mtu - x->props.header_len;
1926 spin_unlock_bh(&x->lock);
1930 int xfrm_init_state(struct xfrm_state *x)
1932 struct xfrm_state_afinfo *afinfo;
1933 int family = x->props.family;
1936 err = -EAFNOSUPPORT;
1937 afinfo = xfrm_state_get_afinfo(family);
1942 if (afinfo->init_flags)
1943 err = afinfo->init_flags(x);
1945 xfrm_state_put_afinfo(afinfo);
1950 err = -EPROTONOSUPPORT;
1951 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1952 if (x->inner_mode == NULL)
1955 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1956 family != x->sel.family)
1959 x->type = xfrm_get_type(x->id.proto, family);
1960 if (x->type == NULL)
1963 err = x->type->init_state(x);
1967 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1968 if (x->outer_mode == NULL)
1971 x->km.state = XFRM_STATE_VALID;
1977 EXPORT_SYMBOL(xfrm_init_state);
1979 void __init xfrm_state_init(void)
1983 sz = sizeof(struct hlist_head) * 8;
1985 xfrm_state_bydst = xfrm_hash_alloc(sz);
1986 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1987 xfrm_state_byspi = xfrm_hash_alloc(sz);
1988 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1989 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1990 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1992 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1995 #ifdef CONFIG_AUDITSYSCALL
1996 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1997 struct audit_buffer *audit_buf)
2000 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2001 x->security->ctx_alg, x->security->ctx_doi,
2002 x->security->ctx_str);
2004 switch(x->props.family) {
2006 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2007 NIPQUAD(x->props.saddr.a4),
2008 NIPQUAD(x->id.daddr.a4));
2012 struct in6_addr saddr6, daddr6;
2014 memcpy(&saddr6, x->props.saddr.a6,
2015 sizeof(struct in6_addr));
2016 memcpy(&daddr6, x->id.daddr.a6,
2017 sizeof(struct in6_addr));
2018 audit_log_format(audit_buf,
2019 " src=" NIP6_FMT " dst=" NIP6_FMT,
2020 NIP6(saddr6), NIP6(daddr6));
2027 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2029 struct audit_buffer *audit_buf;
2031 extern int audit_enabled;
2033 if (audit_enabled == 0)
2035 audit_buf = xfrm_audit_start(auid, sid);
2036 if (audit_buf == NULL)
2038 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2039 xfrm_audit_common_stateinfo(x, audit_buf);
2040 spi = ntohl(x->id.spi);
2041 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2042 audit_log_end(audit_buf);
2044 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2047 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2049 struct audit_buffer *audit_buf;
2051 extern int audit_enabled;
2053 if (audit_enabled == 0)
2055 audit_buf = xfrm_audit_start(auid, sid);
2056 if (audit_buf == NULL)
2058 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2059 xfrm_audit_common_stateinfo(x, audit_buf);
2060 spi = ntohl(x->id.spi);
2061 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2062 audit_log_end(audit_buf);
2064 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2065 #endif /* CONFIG_AUDITSYSCALL */