6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static struct hlist_head *xfrm_state_bydst __read_mostly;
54 static struct hlist_head *xfrm_state_bysrc __read_mostly;
55 static struct hlist_head *xfrm_state_byspi __read_mostly;
56 static unsigned int xfrm_state_hmask __read_mostly;
57 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
58 static unsigned int xfrm_state_num;
59 static unsigned int xfrm_state_genid;
61 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
62 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64 #ifdef CONFIG_AUDITSYSCALL
65 static void xfrm_audit_state_replay(struct xfrm_state *x,
66 struct sk_buff *skb, __be32 net_seq);
68 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
69 #endif /* CONFIG_AUDITSYSCALL */
71 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
74 unsigned short family)
76 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
79 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
80 xfrm_address_t *saddr,
81 unsigned short family)
83 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
86 static inline unsigned int
87 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
89 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
92 static void xfrm_hash_transfer(struct hlist_head *list,
93 struct hlist_head *ndsttable,
94 struct hlist_head *nsrctable,
95 struct hlist_head *nspitable,
96 unsigned int nhashmask)
98 struct hlist_node *entry, *tmp;
101 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
104 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
105 x->props.reqid, x->props.family,
107 hlist_add_head(&x->bydst, ndsttable+h);
109 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
112 hlist_add_head(&x->bysrc, nsrctable+h);
115 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
116 x->id.proto, x->props.family,
118 hlist_add_head(&x->byspi, nspitable+h);
123 static unsigned long xfrm_hash_new_size(void)
125 return ((xfrm_state_hmask + 1) << 1) *
126 sizeof(struct hlist_head);
129 static DEFINE_MUTEX(hash_resize_mutex);
131 static void xfrm_hash_resize(struct work_struct *__unused)
133 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
134 unsigned long nsize, osize;
135 unsigned int nhashmask, ohashmask;
138 mutex_lock(&hash_resize_mutex);
140 nsize = xfrm_hash_new_size();
141 ndst = xfrm_hash_alloc(nsize);
144 nsrc = xfrm_hash_alloc(nsize);
146 xfrm_hash_free(ndst, nsize);
149 nspi = xfrm_hash_alloc(nsize);
151 xfrm_hash_free(ndst, nsize);
152 xfrm_hash_free(nsrc, nsize);
156 spin_lock_bh(&xfrm_state_lock);
158 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
159 for (i = xfrm_state_hmask; i >= 0; i--)
160 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
163 odst = xfrm_state_bydst;
164 osrc = xfrm_state_bysrc;
165 ospi = xfrm_state_byspi;
166 ohashmask = xfrm_state_hmask;
168 xfrm_state_bydst = ndst;
169 xfrm_state_bysrc = nsrc;
170 xfrm_state_byspi = nspi;
171 xfrm_state_hmask = nhashmask;
173 spin_unlock_bh(&xfrm_state_lock);
175 osize = (ohashmask + 1) * sizeof(struct hlist_head);
176 xfrm_hash_free(odst, osize);
177 xfrm_hash_free(osrc, osize);
178 xfrm_hash_free(ospi, osize);
181 mutex_unlock(&hash_resize_mutex);
184 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
186 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
187 EXPORT_SYMBOL(km_waitq);
189 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
190 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192 static struct work_struct xfrm_state_gc_work;
193 static HLIST_HEAD(xfrm_state_gc_list);
194 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196 int __xfrm_state_delete(struct xfrm_state *x);
198 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
199 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
201 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
203 struct xfrm_state_afinfo *afinfo;
204 if (unlikely(family >= NPROTO))
206 write_lock_bh(&xfrm_state_afinfo_lock);
207 afinfo = xfrm_state_afinfo[family];
208 if (unlikely(!afinfo))
209 write_unlock_bh(&xfrm_state_afinfo_lock);
213 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 write_unlock_bh(&xfrm_state_afinfo_lock);
218 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
220 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
221 struct xfrm_type **typemap;
224 if (unlikely(afinfo == NULL))
225 return -EAFNOSUPPORT;
226 typemap = afinfo->type_map;
228 if (likely(typemap[type->proto] == NULL))
229 typemap[type->proto] = type;
232 xfrm_state_unlock_afinfo(afinfo);
235 EXPORT_SYMBOL(xfrm_register_type);
237 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
239 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
240 struct xfrm_type **typemap;
243 if (unlikely(afinfo == NULL))
244 return -EAFNOSUPPORT;
245 typemap = afinfo->type_map;
247 if (unlikely(typemap[type->proto] != type))
250 typemap[type->proto] = NULL;
251 xfrm_state_unlock_afinfo(afinfo);
254 EXPORT_SYMBOL(xfrm_unregister_type);
256 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
258 struct xfrm_state_afinfo *afinfo;
259 struct xfrm_type **typemap;
260 struct xfrm_type *type;
261 int modload_attempted = 0;
264 afinfo = xfrm_state_get_afinfo(family);
265 if (unlikely(afinfo == NULL))
267 typemap = afinfo->type_map;
269 type = typemap[proto];
270 if (unlikely(type && !try_module_get(type->owner)))
272 if (!type && !modload_attempted) {
273 xfrm_state_put_afinfo(afinfo);
274 request_module("xfrm-type-%d-%d", family, proto);
275 modload_attempted = 1;
279 xfrm_state_put_afinfo(afinfo);
283 static void xfrm_put_type(struct xfrm_type *type)
285 module_put(type->owner);
288 int xfrm_register_mode(struct xfrm_mode *mode, int family)
290 struct xfrm_state_afinfo *afinfo;
291 struct xfrm_mode **modemap;
294 if (unlikely(mode->encap >= XFRM_MODE_MAX))
297 afinfo = xfrm_state_lock_afinfo(family);
298 if (unlikely(afinfo == NULL))
299 return -EAFNOSUPPORT;
302 modemap = afinfo->mode_map;
303 if (modemap[mode->encap])
307 if (!try_module_get(afinfo->owner))
310 mode->afinfo = afinfo;
311 modemap[mode->encap] = mode;
315 xfrm_state_unlock_afinfo(afinfo);
318 EXPORT_SYMBOL(xfrm_register_mode);
320 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
322 struct xfrm_state_afinfo *afinfo;
323 struct xfrm_mode **modemap;
326 if (unlikely(mode->encap >= XFRM_MODE_MAX))
329 afinfo = xfrm_state_lock_afinfo(family);
330 if (unlikely(afinfo == NULL))
331 return -EAFNOSUPPORT;
334 modemap = afinfo->mode_map;
335 if (likely(modemap[mode->encap] == mode)) {
336 modemap[mode->encap] = NULL;
337 module_put(mode->afinfo->owner);
341 xfrm_state_unlock_afinfo(afinfo);
344 EXPORT_SYMBOL(xfrm_unregister_mode);
346 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
348 struct xfrm_state_afinfo *afinfo;
349 struct xfrm_mode *mode;
350 int modload_attempted = 0;
352 if (unlikely(encap >= XFRM_MODE_MAX))
356 afinfo = xfrm_state_get_afinfo(family);
357 if (unlikely(afinfo == NULL))
360 mode = afinfo->mode_map[encap];
361 if (unlikely(mode && !try_module_get(mode->owner)))
363 if (!mode && !modload_attempted) {
364 xfrm_state_put_afinfo(afinfo);
365 request_module("xfrm-mode-%d-%d", family, encap);
366 modload_attempted = 1;
370 xfrm_state_put_afinfo(afinfo);
374 static void xfrm_put_mode(struct xfrm_mode *mode)
376 module_put(mode->owner);
379 static void xfrm_state_gc_destroy(struct xfrm_state *x)
381 del_timer_sync(&x->timer);
382 del_timer_sync(&x->rtimer);
389 xfrm_put_mode(x->inner_mode);
391 xfrm_put_mode(x->outer_mode);
393 x->type->destructor(x);
394 xfrm_put_type(x->type);
396 security_xfrm_state_free(x);
400 static void xfrm_state_gc_task(struct work_struct *data)
402 struct xfrm_state *x;
403 struct hlist_node *entry, *tmp;
404 struct hlist_head gc_list;
406 spin_lock_bh(&xfrm_state_gc_lock);
407 gc_list.first = xfrm_state_gc_list.first;
408 INIT_HLIST_HEAD(&xfrm_state_gc_list);
409 spin_unlock_bh(&xfrm_state_gc_lock);
411 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
412 xfrm_state_gc_destroy(x);
417 static inline unsigned long make_jiffies(long secs)
419 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
420 return MAX_SCHEDULE_TIMEOUT-1;
425 static void xfrm_timer_handler(unsigned long data)
427 struct xfrm_state *x = (struct xfrm_state*)data;
428 unsigned long now = get_seconds();
429 long next = LONG_MAX;
434 if (x->km.state == XFRM_STATE_DEAD)
436 if (x->km.state == XFRM_STATE_EXPIRED)
438 if (x->lft.hard_add_expires_seconds) {
439 long tmo = x->lft.hard_add_expires_seconds +
440 x->curlft.add_time - now;
446 if (x->lft.hard_use_expires_seconds) {
447 long tmo = x->lft.hard_use_expires_seconds +
448 (x->curlft.use_time ? : now) - now;
456 if (x->lft.soft_add_expires_seconds) {
457 long tmo = x->lft.soft_add_expires_seconds +
458 x->curlft.add_time - now;
464 if (x->lft.soft_use_expires_seconds) {
465 long tmo = x->lft.soft_use_expires_seconds +
466 (x->curlft.use_time ? : now) - now;
475 km_state_expired(x, 0, 0);
477 if (next != LONG_MAX)
478 mod_timer(&x->timer, jiffies + make_jiffies(next));
483 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
484 x->km.state = XFRM_STATE_EXPIRED;
490 err = __xfrm_state_delete(x);
491 if (!err && x->id.spi)
492 km_state_expired(x, 1, 0);
494 xfrm_audit_state_delete(x, err ? 0 : 1,
495 audit_get_loginuid(current->audit_context), 0);
498 spin_unlock(&x->lock);
501 static void xfrm_replay_timer_handler(unsigned long data);
503 struct xfrm_state *xfrm_state_alloc(void)
505 struct xfrm_state *x;
507 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
510 atomic_set(&x->refcnt, 1);
511 atomic_set(&x->tunnel_users, 0);
512 INIT_HLIST_NODE(&x->bydst);
513 INIT_HLIST_NODE(&x->bysrc);
514 INIT_HLIST_NODE(&x->byspi);
515 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
516 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
518 x->curlft.add_time = get_seconds();
519 x->lft.soft_byte_limit = XFRM_INF;
520 x->lft.soft_packet_limit = XFRM_INF;
521 x->lft.hard_byte_limit = XFRM_INF;
522 x->lft.hard_packet_limit = XFRM_INF;
523 x->replay_maxage = 0;
524 x->replay_maxdiff = 0;
525 spin_lock_init(&x->lock);
529 EXPORT_SYMBOL(xfrm_state_alloc);
531 void __xfrm_state_destroy(struct xfrm_state *x)
533 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
535 spin_lock_bh(&xfrm_state_gc_lock);
536 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
537 spin_unlock_bh(&xfrm_state_gc_lock);
538 schedule_work(&xfrm_state_gc_work);
540 EXPORT_SYMBOL(__xfrm_state_destroy);
542 int __xfrm_state_delete(struct xfrm_state *x)
546 if (x->km.state != XFRM_STATE_DEAD) {
547 x->km.state = XFRM_STATE_DEAD;
548 spin_lock(&xfrm_state_lock);
549 hlist_del(&x->bydst);
550 hlist_del(&x->bysrc);
552 hlist_del(&x->byspi);
554 spin_unlock(&xfrm_state_lock);
556 /* All xfrm_state objects are created by xfrm_state_alloc.
557 * The xfrm_state_alloc call gives a reference, and that
558 * is what we are dropping here.
566 EXPORT_SYMBOL(__xfrm_state_delete);
568 int xfrm_state_delete(struct xfrm_state *x)
572 spin_lock_bh(&x->lock);
573 err = __xfrm_state_delete(x);
574 spin_unlock_bh(&x->lock);
578 EXPORT_SYMBOL(xfrm_state_delete);
580 #ifdef CONFIG_SECURITY_NETWORK_XFRM
582 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
586 for (i = 0; i <= xfrm_state_hmask; i++) {
587 struct hlist_node *entry;
588 struct xfrm_state *x;
590 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
591 if (xfrm_id_proto_match(x->id.proto, proto) &&
592 (err = security_xfrm_state_delete(x)) != 0) {
593 xfrm_audit_state_delete(x, 0,
594 audit_info->loginuid,
605 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
611 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
615 spin_lock_bh(&xfrm_state_lock);
616 err = xfrm_state_flush_secctx_check(proto, audit_info);
620 for (i = 0; i <= xfrm_state_hmask; i++) {
621 struct hlist_node *entry;
622 struct xfrm_state *x;
624 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
625 if (!xfrm_state_kern(x) &&
626 xfrm_id_proto_match(x->id.proto, proto)) {
628 spin_unlock_bh(&xfrm_state_lock);
630 err = xfrm_state_delete(x);
631 xfrm_audit_state_delete(x, err ? 0 : 1,
632 audit_info->loginuid,
636 spin_lock_bh(&xfrm_state_lock);
644 spin_unlock_bh(&xfrm_state_lock);
648 EXPORT_SYMBOL(xfrm_state_flush);
650 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
652 spin_lock_bh(&xfrm_state_lock);
653 si->sadcnt = xfrm_state_num;
654 si->sadhcnt = xfrm_state_hmask;
655 si->sadhmcnt = xfrm_state_hashmax;
656 spin_unlock_bh(&xfrm_state_lock);
658 EXPORT_SYMBOL(xfrm_sad_getinfo);
661 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
662 struct xfrm_tmpl *tmpl,
663 xfrm_address_t *daddr, xfrm_address_t *saddr,
664 unsigned short family)
666 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
669 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
670 xfrm_state_put_afinfo(afinfo);
674 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
676 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
677 struct xfrm_state *x;
678 struct hlist_node *entry;
680 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
681 if (x->props.family != family ||
683 x->id.proto != proto)
688 if (x->id.daddr.a4 != daddr->a4)
692 if (!ipv6_addr_equal((struct in6_addr *)daddr,
706 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
708 unsigned int h = xfrm_src_hash(daddr, saddr, family);
709 struct xfrm_state *x;
710 struct hlist_node *entry;
712 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
713 if (x->props.family != family ||
714 x->id.proto != proto)
719 if (x->id.daddr.a4 != daddr->a4 ||
720 x->props.saddr.a4 != saddr->a4)
724 if (!ipv6_addr_equal((struct in6_addr *)daddr,
727 !ipv6_addr_equal((struct in6_addr *)saddr,
741 static inline struct xfrm_state *
742 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
745 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
746 x->id.proto, family);
748 return __xfrm_state_lookup_byaddr(&x->id.daddr,
750 x->id.proto, family);
753 static void xfrm_hash_grow_check(int have_hash_collision)
755 if (have_hash_collision &&
756 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
757 xfrm_state_num > xfrm_state_hmask)
758 schedule_work(&xfrm_hash_work);
762 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
763 struct flowi *fl, struct xfrm_tmpl *tmpl,
764 struct xfrm_policy *pol, int *err,
765 unsigned short family)
768 struct hlist_node *entry;
769 struct xfrm_state *x, *x0;
770 int acquire_in_progress = 0;
772 struct xfrm_state *best = NULL;
774 spin_lock_bh(&xfrm_state_lock);
775 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
776 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
777 if (x->props.family == family &&
778 x->props.reqid == tmpl->reqid &&
779 !(x->props.flags & XFRM_STATE_WILDRECV) &&
780 xfrm_state_addr_check(x, daddr, saddr, family) &&
781 tmpl->mode == x->props.mode &&
782 tmpl->id.proto == x->id.proto &&
783 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
785 1. There is a valid state with matching selector.
787 2. Valid state with inappropriate selector. Skip.
789 Entering area of "sysdeps".
791 3. If state is not valid, selector is temporary,
792 it selects only session which triggered
793 previous resolution. Key manager will do
794 something to install a state with proper
797 if (x->km.state == XFRM_STATE_VALID) {
798 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
799 !security_xfrm_state_pol_flow_match(x, pol, fl))
802 best->km.dying > x->km.dying ||
803 (best->km.dying == x->km.dying &&
804 best->curlft.add_time < x->curlft.add_time))
806 } else if (x->km.state == XFRM_STATE_ACQ) {
807 acquire_in_progress = 1;
808 } else if (x->km.state == XFRM_STATE_ERROR ||
809 x->km.state == XFRM_STATE_EXPIRED) {
810 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
811 security_xfrm_state_pol_flow_match(x, pol, fl))
818 if (!x && !error && !acquire_in_progress) {
820 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
821 tmpl->id.proto, family)) != NULL) {
826 x = xfrm_state_alloc();
831 /* Initialize temporary selector matching only
832 * to current session. */
833 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
835 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
837 x->km.state = XFRM_STATE_DEAD;
843 if (km_query(x, tmpl, pol) == 0) {
844 x->km.state = XFRM_STATE_ACQ;
845 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
846 h = xfrm_src_hash(daddr, saddr, family);
847 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
849 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
850 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
852 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
853 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
854 add_timer(&x->timer);
856 xfrm_hash_grow_check(x->bydst.next != NULL);
858 x->km.state = XFRM_STATE_DEAD;
868 *err = acquire_in_progress ? -EAGAIN : error;
869 spin_unlock_bh(&xfrm_state_lock);
874 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
875 unsigned short family, u8 mode, u8 proto, u32 reqid)
878 struct xfrm_state *rx = NULL, *x = NULL;
879 struct hlist_node *entry;
881 spin_lock(&xfrm_state_lock);
882 h = xfrm_dst_hash(daddr, saddr, reqid, family);
883 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
884 if (x->props.family == family &&
885 x->props.reqid == reqid &&
886 !(x->props.flags & XFRM_STATE_WILDRECV) &&
887 xfrm_state_addr_check(x, daddr, saddr, family) &&
888 mode == x->props.mode &&
889 proto == x->id.proto &&
890 x->km.state == XFRM_STATE_VALID) {
898 spin_unlock(&xfrm_state_lock);
903 EXPORT_SYMBOL(xfrm_stateonly_find);
905 static void __xfrm_state_insert(struct xfrm_state *x)
909 x->genid = ++xfrm_state_genid;
911 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
912 x->props.reqid, x->props.family);
913 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
915 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
916 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
919 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
922 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
925 mod_timer(&x->timer, jiffies + HZ);
926 if (x->replay_maxage)
927 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
933 xfrm_hash_grow_check(x->bydst.next != NULL);
936 /* xfrm_state_lock is held */
937 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
939 unsigned short family = xnew->props.family;
940 u32 reqid = xnew->props.reqid;
941 struct xfrm_state *x;
942 struct hlist_node *entry;
945 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
946 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
947 if (x->props.family == family &&
948 x->props.reqid == reqid &&
949 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
950 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
951 x->genid = xfrm_state_genid;
955 void xfrm_state_insert(struct xfrm_state *x)
957 spin_lock_bh(&xfrm_state_lock);
958 __xfrm_state_bump_genids(x);
959 __xfrm_state_insert(x);
960 spin_unlock_bh(&xfrm_state_lock);
962 EXPORT_SYMBOL(xfrm_state_insert);
964 /* xfrm_state_lock is held */
965 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
967 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
968 struct hlist_node *entry;
969 struct xfrm_state *x;
971 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
972 if (x->props.reqid != reqid ||
973 x->props.mode != mode ||
974 x->props.family != family ||
975 x->km.state != XFRM_STATE_ACQ ||
977 x->id.proto != proto)
982 if (x->id.daddr.a4 != daddr->a4 ||
983 x->props.saddr.a4 != saddr->a4)
987 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
988 (struct in6_addr *)daddr) ||
989 !ipv6_addr_equal((struct in6_addr *)
991 (struct in6_addr *)saddr))
1003 x = xfrm_state_alloc();
1007 x->sel.daddr.a4 = daddr->a4;
1008 x->sel.saddr.a4 = saddr->a4;
1009 x->sel.prefixlen_d = 32;
1010 x->sel.prefixlen_s = 32;
1011 x->props.saddr.a4 = saddr->a4;
1012 x->id.daddr.a4 = daddr->a4;
1016 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1017 (struct in6_addr *)daddr);
1018 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1019 (struct in6_addr *)saddr);
1020 x->sel.prefixlen_d = 128;
1021 x->sel.prefixlen_s = 128;
1022 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1023 (struct in6_addr *)saddr);
1024 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1025 (struct in6_addr *)daddr);
1029 x->km.state = XFRM_STATE_ACQ;
1030 x->id.proto = proto;
1031 x->props.family = family;
1032 x->props.mode = mode;
1033 x->props.reqid = reqid;
1034 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1036 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1037 add_timer(&x->timer);
1038 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1039 h = xfrm_src_hash(daddr, saddr, family);
1040 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1044 xfrm_hash_grow_check(x->bydst.next != NULL);
1050 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1052 int xfrm_state_add(struct xfrm_state *x)
1054 struct xfrm_state *x1;
1057 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1059 family = x->props.family;
1061 spin_lock_bh(&xfrm_state_lock);
1063 x1 = __xfrm_state_locate(x, use_spi, family);
1071 if (use_spi && x->km.seq) {
1072 x1 = __xfrm_find_acq_byseq(x->km.seq);
1073 if (x1 && ((x1->id.proto != x->id.proto) ||
1074 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1081 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1083 &x->id.daddr, &x->props.saddr, 0);
1085 __xfrm_state_bump_genids(x);
1086 __xfrm_state_insert(x);
1090 spin_unlock_bh(&xfrm_state_lock);
1093 xfrm_state_delete(x1);
1099 EXPORT_SYMBOL(xfrm_state_add);
1101 #ifdef CONFIG_XFRM_MIGRATE
1102 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1105 struct xfrm_state *x = xfrm_state_alloc();
1109 memcpy(&x->id, &orig->id, sizeof(x->id));
1110 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1111 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1112 x->props.mode = orig->props.mode;
1113 x->props.replay_window = orig->props.replay_window;
1114 x->props.reqid = orig->props.reqid;
1115 x->props.family = orig->props.family;
1116 x->props.saddr = orig->props.saddr;
1119 x->aalg = xfrm_algo_clone(orig->aalg);
1123 x->props.aalgo = orig->props.aalgo;
1126 x->ealg = xfrm_algo_clone(orig->ealg);
1130 x->props.ealgo = orig->props.ealgo;
1133 x->calg = xfrm_algo_clone(orig->calg);
1137 x->props.calgo = orig->props.calgo;
1140 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1146 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1152 err = xfrm_init_state(x);
1156 x->props.flags = orig->props.flags;
1158 x->curlft.add_time = orig->curlft.add_time;
1159 x->km.state = orig->km.state;
1160 x->km.seq = orig->km.seq;
1177 EXPORT_SYMBOL(xfrm_state_clone);
1179 /* xfrm_state_lock is held */
1180 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1183 struct xfrm_state *x;
1184 struct hlist_node *entry;
1187 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1188 m->reqid, m->old_family);
1189 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1190 if (x->props.mode != m->mode ||
1191 x->id.proto != m->proto)
1193 if (m->reqid && x->props.reqid != m->reqid)
1195 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1197 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1204 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1206 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1207 if (x->props.mode != m->mode ||
1208 x->id.proto != m->proto)
1210 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1212 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1222 EXPORT_SYMBOL(xfrm_migrate_state_find);
1224 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1225 struct xfrm_migrate *m)
1227 struct xfrm_state *xc;
1230 xc = xfrm_state_clone(x, &err);
1234 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1235 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1238 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1239 /* a care is needed when the destination address of the
1240 state is to be updated as it is a part of triplet */
1241 xfrm_state_insert(xc);
1243 if ((err = xfrm_state_add(xc)) < 0)
1252 EXPORT_SYMBOL(xfrm_state_migrate);
1255 int xfrm_state_update(struct xfrm_state *x)
1257 struct xfrm_state *x1;
1259 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1261 spin_lock_bh(&xfrm_state_lock);
1262 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1268 if (xfrm_state_kern(x1)) {
1274 if (x1->km.state == XFRM_STATE_ACQ) {
1275 __xfrm_state_insert(x);
1281 spin_unlock_bh(&xfrm_state_lock);
1287 xfrm_state_delete(x1);
1293 spin_lock_bh(&x1->lock);
1294 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1295 if (x->encap && x1->encap)
1296 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1297 if (x->coaddr && x1->coaddr) {
1298 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1300 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1301 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1302 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1305 mod_timer(&x1->timer, jiffies + HZ);
1306 if (x1->curlft.use_time)
1307 xfrm_state_check_expire(x1);
1311 spin_unlock_bh(&x1->lock);
1317 EXPORT_SYMBOL(xfrm_state_update);
1319 int xfrm_state_check_expire(struct xfrm_state *x)
1321 if (!x->curlft.use_time)
1322 x->curlft.use_time = get_seconds();
1324 if (x->km.state != XFRM_STATE_VALID)
1327 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1328 x->curlft.packets >= x->lft.hard_packet_limit) {
1329 x->km.state = XFRM_STATE_EXPIRED;
1330 mod_timer(&x->timer, jiffies);
1335 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1336 x->curlft.packets >= x->lft.soft_packet_limit)) {
1338 km_state_expired(x, 0, 0);
1342 EXPORT_SYMBOL(xfrm_state_check_expire);
1345 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1346 unsigned short family)
1348 struct xfrm_state *x;
1350 spin_lock_bh(&xfrm_state_lock);
1351 x = __xfrm_state_lookup(daddr, spi, proto, family);
1352 spin_unlock_bh(&xfrm_state_lock);
1355 EXPORT_SYMBOL(xfrm_state_lookup);
1358 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1359 u8 proto, unsigned short family)
1361 struct xfrm_state *x;
1363 spin_lock_bh(&xfrm_state_lock);
1364 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1365 spin_unlock_bh(&xfrm_state_lock);
1368 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1371 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1372 xfrm_address_t *daddr, xfrm_address_t *saddr,
1373 int create, unsigned short family)
1375 struct xfrm_state *x;
1377 spin_lock_bh(&xfrm_state_lock);
1378 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1379 spin_unlock_bh(&xfrm_state_lock);
1383 EXPORT_SYMBOL(xfrm_find_acq);
1385 #ifdef CONFIG_XFRM_SUB_POLICY
1387 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1388 unsigned short family)
1391 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1393 return -EAFNOSUPPORT;
1395 spin_lock_bh(&xfrm_state_lock);
1396 if (afinfo->tmpl_sort)
1397 err = afinfo->tmpl_sort(dst, src, n);
1398 spin_unlock_bh(&xfrm_state_lock);
1399 xfrm_state_put_afinfo(afinfo);
1402 EXPORT_SYMBOL(xfrm_tmpl_sort);
1405 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1406 unsigned short family)
1409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1411 return -EAFNOSUPPORT;
1413 spin_lock_bh(&xfrm_state_lock);
1414 if (afinfo->state_sort)
1415 err = afinfo->state_sort(dst, src, n);
1416 spin_unlock_bh(&xfrm_state_lock);
1417 xfrm_state_put_afinfo(afinfo);
1420 EXPORT_SYMBOL(xfrm_state_sort);
1423 /* Silly enough, but I'm lazy to build resolution list */
1425 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1429 for (i = 0; i <= xfrm_state_hmask; i++) {
1430 struct hlist_node *entry;
1431 struct xfrm_state *x;
1433 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1434 if (x->km.seq == seq &&
1435 x->km.state == XFRM_STATE_ACQ) {
1444 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1446 struct xfrm_state *x;
1448 spin_lock_bh(&xfrm_state_lock);
1449 x = __xfrm_find_acq_byseq(seq);
1450 spin_unlock_bh(&xfrm_state_lock);
1453 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1455 u32 xfrm_get_acqseq(void)
1459 static DEFINE_SPINLOCK(acqseq_lock);
1461 spin_lock_bh(&acqseq_lock);
1462 res = (++acqseq ? : ++acqseq);
1463 spin_unlock_bh(&acqseq_lock);
1466 EXPORT_SYMBOL(xfrm_get_acqseq);
1468 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1471 struct xfrm_state *x0;
1473 __be32 minspi = htonl(low);
1474 __be32 maxspi = htonl(high);
1476 spin_lock_bh(&x->lock);
1477 if (x->km.state == XFRM_STATE_DEAD)
1486 if (minspi == maxspi) {
1487 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1495 for (h=0; h<high-low+1; h++) {
1496 spi = low + net_random()%(high-low+1);
1497 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1499 x->id.spi = htonl(spi);
1506 spin_lock_bh(&xfrm_state_lock);
1507 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1508 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1509 spin_unlock_bh(&xfrm_state_lock);
1515 spin_unlock_bh(&x->lock);
1519 EXPORT_SYMBOL(xfrm_alloc_spi);
1521 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1525 struct xfrm_state *x, *last = NULL;
1526 struct hlist_node *entry;
1530 spin_lock_bh(&xfrm_state_lock);
1531 for (i = 0; i <= xfrm_state_hmask; i++) {
1532 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1533 if (!xfrm_id_proto_match(x->id.proto, proto))
1536 err = func(last, count, data);
1548 err = func(last, 0, data);
1550 spin_unlock_bh(&xfrm_state_lock);
1553 EXPORT_SYMBOL(xfrm_state_walk);
1556 void xfrm_replay_notify(struct xfrm_state *x, int event)
1559 /* we send notify messages in case
1560 * 1. we updated on of the sequence numbers, and the seqno difference
1561 * is at least x->replay_maxdiff, in this case we also update the
1562 * timeout of our timer function
1563 * 2. if x->replay_maxage has elapsed since last update,
1564 * and there were changes
1566 * The state structure must be locked!
1570 case XFRM_REPLAY_UPDATE:
1571 if (x->replay_maxdiff &&
1572 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1573 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1574 if (x->xflags & XFRM_TIME_DEFER)
1575 event = XFRM_REPLAY_TIMEOUT;
1582 case XFRM_REPLAY_TIMEOUT:
1583 if ((x->replay.seq == x->preplay.seq) &&
1584 (x->replay.bitmap == x->preplay.bitmap) &&
1585 (x->replay.oseq == x->preplay.oseq)) {
1586 x->xflags |= XFRM_TIME_DEFER;
1593 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1594 c.event = XFRM_MSG_NEWAE;
1595 c.data.aevent = event;
1596 km_state_notify(x, &c);
1598 if (x->replay_maxage &&
1599 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1600 x->xflags &= ~XFRM_TIME_DEFER;
1603 static void xfrm_replay_timer_handler(unsigned long data)
1605 struct xfrm_state *x = (struct xfrm_state*)data;
1607 spin_lock(&x->lock);
1609 if (x->km.state == XFRM_STATE_VALID) {
1610 if (xfrm_aevent_is_on())
1611 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1613 x->xflags |= XFRM_TIME_DEFER;
1616 spin_unlock(&x->lock);
1619 int xfrm_replay_check(struct xfrm_state *x,
1620 struct sk_buff *skb, __be32 net_seq)
1623 u32 seq = ntohl(net_seq);
1625 if (unlikely(seq == 0))
1628 if (likely(seq > x->replay.seq))
1631 diff = x->replay.seq - seq;
1632 if (diff >= min_t(unsigned int, x->props.replay_window,
1633 sizeof(x->replay.bitmap) * 8)) {
1634 x->stats.replay_window++;
1638 if (x->replay.bitmap & (1U << diff)) {
1645 xfrm_audit_state_replay(x, skb, net_seq);
1648 EXPORT_SYMBOL(xfrm_replay_check);
1650 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1653 u32 seq = ntohl(net_seq);
1655 if (seq > x->replay.seq) {
1656 diff = seq - x->replay.seq;
1657 if (diff < x->props.replay_window)
1658 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1660 x->replay.bitmap = 1;
1661 x->replay.seq = seq;
1663 diff = x->replay.seq - seq;
1664 x->replay.bitmap |= (1U << diff);
1667 if (xfrm_aevent_is_on())
1668 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1670 EXPORT_SYMBOL(xfrm_replay_advance);
1672 static LIST_HEAD(xfrm_km_list);
1673 static DEFINE_RWLOCK(xfrm_km_lock);
1675 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1677 struct xfrm_mgr *km;
1679 read_lock(&xfrm_km_lock);
1680 list_for_each_entry(km, &xfrm_km_list, list)
1681 if (km->notify_policy)
1682 km->notify_policy(xp, dir, c);
1683 read_unlock(&xfrm_km_lock);
1686 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1688 struct xfrm_mgr *km;
1689 read_lock(&xfrm_km_lock);
1690 list_for_each_entry(km, &xfrm_km_list, list)
1693 read_unlock(&xfrm_km_lock);
1696 EXPORT_SYMBOL(km_policy_notify);
1697 EXPORT_SYMBOL(km_state_notify);
1699 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1705 c.event = XFRM_MSG_EXPIRE;
1706 km_state_notify(x, &c);
1712 EXPORT_SYMBOL(km_state_expired);
1714 * We send to all registered managers regardless of failure
1715 * We are happy with one success
1717 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1719 int err = -EINVAL, acqret;
1720 struct xfrm_mgr *km;
1722 read_lock(&xfrm_km_lock);
1723 list_for_each_entry(km, &xfrm_km_list, list) {
1724 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1728 read_unlock(&xfrm_km_lock);
1731 EXPORT_SYMBOL(km_query);
1733 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1736 struct xfrm_mgr *km;
1738 read_lock(&xfrm_km_lock);
1739 list_for_each_entry(km, &xfrm_km_list, list) {
1740 if (km->new_mapping)
1741 err = km->new_mapping(x, ipaddr, sport);
1745 read_unlock(&xfrm_km_lock);
1748 EXPORT_SYMBOL(km_new_mapping);
1750 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1756 c.event = XFRM_MSG_POLEXPIRE;
1757 km_policy_notify(pol, dir, &c);
1762 EXPORT_SYMBOL(km_policy_expired);
1764 #ifdef CONFIG_XFRM_MIGRATE
1765 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1766 struct xfrm_migrate *m, int num_migrate)
1770 struct xfrm_mgr *km;
1772 read_lock(&xfrm_km_lock);
1773 list_for_each_entry(km, &xfrm_km_list, list) {
1775 ret = km->migrate(sel, dir, type, m, num_migrate);
1780 read_unlock(&xfrm_km_lock);
1783 EXPORT_SYMBOL(km_migrate);
1786 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1790 struct xfrm_mgr *km;
1792 read_lock(&xfrm_km_lock);
1793 list_for_each_entry(km, &xfrm_km_list, list) {
1795 ret = km->report(proto, sel, addr);
1800 read_unlock(&xfrm_km_lock);
1803 EXPORT_SYMBOL(km_report);
1805 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1809 struct xfrm_mgr *km;
1810 struct xfrm_policy *pol = NULL;
1812 if (optlen <= 0 || optlen > PAGE_SIZE)
1815 data = kmalloc(optlen, GFP_KERNEL);
1820 if (copy_from_user(data, optval, optlen))
1824 read_lock(&xfrm_km_lock);
1825 list_for_each_entry(km, &xfrm_km_list, list) {
1826 pol = km->compile_policy(sk, optname, data,
1831 read_unlock(&xfrm_km_lock);
1834 xfrm_sk_policy_insert(sk, err, pol);
1843 EXPORT_SYMBOL(xfrm_user_policy);
1845 int xfrm_register_km(struct xfrm_mgr *km)
1847 write_lock_bh(&xfrm_km_lock);
1848 list_add_tail(&km->list, &xfrm_km_list);
1849 write_unlock_bh(&xfrm_km_lock);
1852 EXPORT_SYMBOL(xfrm_register_km);
1854 int xfrm_unregister_km(struct xfrm_mgr *km)
1856 write_lock_bh(&xfrm_km_lock);
1857 list_del(&km->list);
1858 write_unlock_bh(&xfrm_km_lock);
1861 EXPORT_SYMBOL(xfrm_unregister_km);
1863 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1866 if (unlikely(afinfo == NULL))
1868 if (unlikely(afinfo->family >= NPROTO))
1869 return -EAFNOSUPPORT;
1870 write_lock_bh(&xfrm_state_afinfo_lock);
1871 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1874 xfrm_state_afinfo[afinfo->family] = afinfo;
1875 write_unlock_bh(&xfrm_state_afinfo_lock);
1878 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1880 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1883 if (unlikely(afinfo == NULL))
1885 if (unlikely(afinfo->family >= NPROTO))
1886 return -EAFNOSUPPORT;
1887 write_lock_bh(&xfrm_state_afinfo_lock);
1888 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1889 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1892 xfrm_state_afinfo[afinfo->family] = NULL;
1894 write_unlock_bh(&xfrm_state_afinfo_lock);
1897 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1899 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1901 struct xfrm_state_afinfo *afinfo;
1902 if (unlikely(family >= NPROTO))
1904 read_lock(&xfrm_state_afinfo_lock);
1905 afinfo = xfrm_state_afinfo[family];
1906 if (unlikely(!afinfo))
1907 read_unlock(&xfrm_state_afinfo_lock);
1911 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1913 read_unlock(&xfrm_state_afinfo_lock);
1916 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1917 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1920 struct xfrm_state *t = x->tunnel;
1922 if (atomic_read(&t->tunnel_users) == 2)
1923 xfrm_state_delete(t);
1924 atomic_dec(&t->tunnel_users);
1929 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1931 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1935 spin_lock_bh(&x->lock);
1936 if (x->km.state == XFRM_STATE_VALID &&
1937 x->type && x->type->get_mtu)
1938 res = x->type->get_mtu(x, mtu);
1940 res = mtu - x->props.header_len;
1941 spin_unlock_bh(&x->lock);
1945 int xfrm_init_state(struct xfrm_state *x)
1947 struct xfrm_state_afinfo *afinfo;
1948 int family = x->props.family;
1951 err = -EAFNOSUPPORT;
1952 afinfo = xfrm_state_get_afinfo(family);
1957 if (afinfo->init_flags)
1958 err = afinfo->init_flags(x);
1960 xfrm_state_put_afinfo(afinfo);
1965 err = -EPROTONOSUPPORT;
1966 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1967 if (x->inner_mode == NULL)
1970 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1971 family != x->sel.family)
1974 x->type = xfrm_get_type(x->id.proto, family);
1975 if (x->type == NULL)
1978 err = x->type->init_state(x);
1982 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1983 if (x->outer_mode == NULL)
1986 x->km.state = XFRM_STATE_VALID;
1992 EXPORT_SYMBOL(xfrm_init_state);
1994 void __init xfrm_state_init(void)
1998 sz = sizeof(struct hlist_head) * 8;
2000 xfrm_state_bydst = xfrm_hash_alloc(sz);
2001 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2002 xfrm_state_byspi = xfrm_hash_alloc(sz);
2003 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2004 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2005 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2007 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2010 #ifdef CONFIG_AUDITSYSCALL
2011 static inline void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2012 struct audit_buffer *audit_buf)
2014 struct xfrm_sec_ctx *ctx = x->security;
2015 u32 spi = ntohl(x->id.spi);
2018 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2019 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2021 switch(x->props.family) {
2023 audit_log_format(audit_buf,
2024 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2025 NIPQUAD(x->props.saddr.a4),
2026 NIPQUAD(x->id.daddr.a4));
2029 audit_log_format(audit_buf,
2030 " src=" NIP6_FMT " dst=" NIP6_FMT,
2031 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2032 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2036 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2039 static inline void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2040 struct audit_buffer *audit_buf)
2043 struct ipv6hdr *iph6;
2048 audit_log_format(audit_buf,
2049 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2050 NIPQUAD(iph4->saddr),
2051 NIPQUAD(iph4->daddr));
2054 iph6 = ipv6_hdr(skb);
2055 audit_log_format(audit_buf,
2056 " src=" NIP6_FMT " dst=" NIP6_FMT
2057 " flowlbl=0x%x%x%x",
2060 iph6->flow_lbl[0] & 0x0f,
2067 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2068 u32 auid, u32 secid)
2070 struct audit_buffer *audit_buf;
2072 audit_buf = xfrm_audit_start("SAD-add");
2073 if (audit_buf == NULL)
2075 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2076 xfrm_audit_helper_sainfo(x, audit_buf);
2077 audit_log_format(audit_buf, " res=%u", result);
2078 audit_log_end(audit_buf);
2080 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2082 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2083 u32 auid, u32 secid)
2085 struct audit_buffer *audit_buf;
2087 audit_buf = xfrm_audit_start("SAD-delete");
2088 if (audit_buf == NULL)
2090 xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
2091 xfrm_audit_helper_sainfo(x, audit_buf);
2092 audit_log_format(audit_buf, " res=%u", result);
2093 audit_log_end(audit_buf);
2095 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2097 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2098 struct sk_buff *skb)
2100 struct audit_buffer *audit_buf;
2103 audit_buf = xfrm_audit_start("SA-replay-overflow");
2104 if (audit_buf == NULL)
2106 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2107 /* don't record the sequence number because it's inherent in this kind
2108 * of audit message */
2109 spi = ntohl(x->id.spi);
2110 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2111 audit_log_end(audit_buf);
2113 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2115 static void xfrm_audit_state_replay(struct xfrm_state *x,
2116 struct sk_buff *skb, __be32 net_seq)
2118 struct audit_buffer *audit_buf;
2121 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2122 if (audit_buf == NULL)
2124 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2125 spi = ntohl(x->id.spi);
2126 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2127 spi, spi, ntohl(net_seq));
2128 audit_log_end(audit_buf);
2131 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2133 struct audit_buffer *audit_buf;
2135 audit_buf = xfrm_audit_start("SA-notfound");
2136 if (audit_buf == NULL)
2138 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2139 audit_log_end(audit_buf);
2141 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2143 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2144 __be32 net_spi, __be32 net_seq)
2146 struct audit_buffer *audit_buf;
2149 audit_buf = xfrm_audit_start("SA-notfound");
2150 if (audit_buf == NULL)
2152 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2153 spi = ntohl(net_spi);
2154 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2155 spi, spi, ntohl(net_seq));
2156 audit_log_end(audit_buf);
2158 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2160 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2161 struct sk_buff *skb, u8 proto)
2163 struct audit_buffer *audit_buf;
2167 audit_buf = xfrm_audit_start("SA-icv-failure");
2168 if (audit_buf == NULL)
2170 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2171 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2172 u32 spi = ntohl(net_spi);
2173 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2174 spi, spi, ntohl(net_seq));
2176 audit_log_end(audit_buf);
2178 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2179 #endif /* CONFIG_AUDITSYSCALL */