6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static struct hlist_head *xfrm_state_bydst __read_mostly;
54 static struct hlist_head *xfrm_state_bysrc __read_mostly;
55 static struct hlist_head *xfrm_state_byspi __read_mostly;
56 static unsigned int xfrm_state_hmask __read_mostly;
57 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
58 static unsigned int xfrm_state_num;
59 static unsigned int xfrm_state_genid;
61 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
62 xfrm_address_t *saddr,
64 unsigned short family)
66 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
69 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
70 xfrm_address_t *saddr,
71 unsigned short family)
73 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
76 static inline unsigned int
77 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
79 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
82 static void xfrm_hash_transfer(struct hlist_head *list,
83 struct hlist_head *ndsttable,
84 struct hlist_head *nsrctable,
85 struct hlist_head *nspitable,
86 unsigned int nhashmask)
88 struct hlist_node *entry, *tmp;
91 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
94 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
95 x->props.reqid, x->props.family,
97 hlist_add_head(&x->bydst, ndsttable+h);
99 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
102 hlist_add_head(&x->bysrc, nsrctable+h);
105 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
106 x->id.proto, x->props.family,
108 hlist_add_head(&x->byspi, nspitable+h);
113 static unsigned long xfrm_hash_new_size(void)
115 return ((xfrm_state_hmask + 1) << 1) *
116 sizeof(struct hlist_head);
119 static DEFINE_MUTEX(hash_resize_mutex);
121 static void xfrm_hash_resize(struct work_struct *__unused)
123 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
124 unsigned long nsize, osize;
125 unsigned int nhashmask, ohashmask;
128 mutex_lock(&hash_resize_mutex);
130 nsize = xfrm_hash_new_size();
131 ndst = xfrm_hash_alloc(nsize);
134 nsrc = xfrm_hash_alloc(nsize);
136 xfrm_hash_free(ndst, nsize);
139 nspi = xfrm_hash_alloc(nsize);
141 xfrm_hash_free(ndst, nsize);
142 xfrm_hash_free(nsrc, nsize);
146 spin_lock_bh(&xfrm_state_lock);
148 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
149 for (i = xfrm_state_hmask; i >= 0; i--)
150 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
153 odst = xfrm_state_bydst;
154 osrc = xfrm_state_bysrc;
155 ospi = xfrm_state_byspi;
156 ohashmask = xfrm_state_hmask;
158 xfrm_state_bydst = ndst;
159 xfrm_state_bysrc = nsrc;
160 xfrm_state_byspi = nspi;
161 xfrm_state_hmask = nhashmask;
163 spin_unlock_bh(&xfrm_state_lock);
165 osize = (ohashmask + 1) * sizeof(struct hlist_head);
166 xfrm_hash_free(odst, osize);
167 xfrm_hash_free(osrc, osize);
168 xfrm_hash_free(ospi, osize);
171 mutex_unlock(&hash_resize_mutex);
174 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
176 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
177 EXPORT_SYMBOL(km_waitq);
179 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
180 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
182 static struct work_struct xfrm_state_gc_work;
183 static HLIST_HEAD(xfrm_state_gc_list);
184 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
186 int __xfrm_state_delete(struct xfrm_state *x);
188 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
189 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
191 static void xfrm_state_gc_destroy(struct xfrm_state *x)
193 del_timer_sync(&x->timer);
194 del_timer_sync(&x->rtimer);
201 xfrm_put_mode(x->mode);
203 x->type->destructor(x);
204 xfrm_put_type(x->type);
206 security_xfrm_state_free(x);
210 static void xfrm_state_gc_task(struct work_struct *data)
212 struct xfrm_state *x;
213 struct hlist_node *entry, *tmp;
214 struct hlist_head gc_list;
216 spin_lock_bh(&xfrm_state_gc_lock);
217 gc_list.first = xfrm_state_gc_list.first;
218 INIT_HLIST_HEAD(&xfrm_state_gc_list);
219 spin_unlock_bh(&xfrm_state_gc_lock);
221 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
222 xfrm_state_gc_destroy(x);
227 static inline unsigned long make_jiffies(long secs)
229 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
230 return MAX_SCHEDULE_TIMEOUT-1;
235 static void xfrm_timer_handler(unsigned long data)
237 struct xfrm_state *x = (struct xfrm_state*)data;
238 unsigned long now = get_seconds();
239 long next = LONG_MAX;
244 if (x->km.state == XFRM_STATE_DEAD)
246 if (x->km.state == XFRM_STATE_EXPIRED)
248 if (x->lft.hard_add_expires_seconds) {
249 long tmo = x->lft.hard_add_expires_seconds +
250 x->curlft.add_time - now;
256 if (x->lft.hard_use_expires_seconds) {
257 long tmo = x->lft.hard_use_expires_seconds +
258 (x->curlft.use_time ? : now) - now;
266 if (x->lft.soft_add_expires_seconds) {
267 long tmo = x->lft.soft_add_expires_seconds +
268 x->curlft.add_time - now;
274 if (x->lft.soft_use_expires_seconds) {
275 long tmo = x->lft.soft_use_expires_seconds +
276 (x->curlft.use_time ? : now) - now;
285 km_state_expired(x, 0, 0);
287 if (next != LONG_MAX)
288 mod_timer(&x->timer, jiffies + make_jiffies(next));
293 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
294 x->km.state = XFRM_STATE_EXPIRED;
300 err = __xfrm_state_delete(x);
301 if (!err && x->id.spi)
302 km_state_expired(x, 1, 0);
304 xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
305 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
308 spin_unlock(&x->lock);
311 static void xfrm_replay_timer_handler(unsigned long data);
313 struct xfrm_state *xfrm_state_alloc(void)
315 struct xfrm_state *x;
317 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
320 atomic_set(&x->refcnt, 1);
321 atomic_set(&x->tunnel_users, 0);
322 INIT_HLIST_NODE(&x->bydst);
323 INIT_HLIST_NODE(&x->bysrc);
324 INIT_HLIST_NODE(&x->byspi);
325 init_timer(&x->timer);
326 x->timer.function = xfrm_timer_handler;
327 x->timer.data = (unsigned long)x;
328 init_timer(&x->rtimer);
329 x->rtimer.function = xfrm_replay_timer_handler;
330 x->rtimer.data = (unsigned long)x;
331 x->curlft.add_time = get_seconds();
332 x->lft.soft_byte_limit = XFRM_INF;
333 x->lft.soft_packet_limit = XFRM_INF;
334 x->lft.hard_byte_limit = XFRM_INF;
335 x->lft.hard_packet_limit = XFRM_INF;
336 x->replay_maxage = 0;
337 x->replay_maxdiff = 0;
338 spin_lock_init(&x->lock);
342 EXPORT_SYMBOL(xfrm_state_alloc);
344 void __xfrm_state_destroy(struct xfrm_state *x)
346 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
348 spin_lock_bh(&xfrm_state_gc_lock);
349 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
350 spin_unlock_bh(&xfrm_state_gc_lock);
351 schedule_work(&xfrm_state_gc_work);
353 EXPORT_SYMBOL(__xfrm_state_destroy);
355 int __xfrm_state_delete(struct xfrm_state *x)
359 if (x->km.state != XFRM_STATE_DEAD) {
360 x->km.state = XFRM_STATE_DEAD;
361 spin_lock(&xfrm_state_lock);
362 hlist_del(&x->bydst);
363 hlist_del(&x->bysrc);
365 hlist_del(&x->byspi);
367 spin_unlock(&xfrm_state_lock);
369 /* All xfrm_state objects are created by xfrm_state_alloc.
370 * The xfrm_state_alloc call gives a reference, and that
371 * is what we are dropping here.
379 EXPORT_SYMBOL(__xfrm_state_delete);
381 int xfrm_state_delete(struct xfrm_state *x)
385 spin_lock_bh(&x->lock);
386 err = __xfrm_state_delete(x);
387 spin_unlock_bh(&x->lock);
391 EXPORT_SYMBOL(xfrm_state_delete);
393 #ifdef CONFIG_SECURITY_NETWORK_XFRM
395 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
399 for (i = 0; i <= xfrm_state_hmask; i++) {
400 struct hlist_node *entry;
401 struct xfrm_state *x;
403 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
404 if (xfrm_id_proto_match(x->id.proto, proto) &&
405 (err = security_xfrm_state_delete(x)) != 0) {
406 xfrm_audit_log(audit_info->loginuid,
408 AUDIT_MAC_IPSEC_DELSA,
420 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
426 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
430 spin_lock_bh(&xfrm_state_lock);
431 err = xfrm_state_flush_secctx_check(proto, audit_info);
435 for (i = 0; i <= xfrm_state_hmask; i++) {
436 struct hlist_node *entry;
437 struct xfrm_state *x;
439 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
440 if (!xfrm_state_kern(x) &&
441 xfrm_id_proto_match(x->id.proto, proto)) {
443 spin_unlock_bh(&xfrm_state_lock);
445 err = xfrm_state_delete(x);
446 xfrm_audit_log(audit_info->loginuid,
448 AUDIT_MAC_IPSEC_DELSA,
449 err ? 0 : 1, NULL, x);
452 spin_lock_bh(&xfrm_state_lock);
460 spin_unlock_bh(&xfrm_state_lock);
464 EXPORT_SYMBOL(xfrm_state_flush);
466 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
468 spin_lock_bh(&xfrm_state_lock);
469 si->sadcnt = xfrm_state_num;
470 si->sadhcnt = xfrm_state_hmask;
471 si->sadhmcnt = xfrm_state_hashmax;
472 spin_unlock_bh(&xfrm_state_lock);
474 EXPORT_SYMBOL(xfrm_sad_getinfo);
477 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
478 struct xfrm_tmpl *tmpl,
479 xfrm_address_t *daddr, xfrm_address_t *saddr,
480 unsigned short family)
482 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
485 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
486 xfrm_state_put_afinfo(afinfo);
490 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
492 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
493 struct xfrm_state *x;
494 struct hlist_node *entry;
496 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
497 if (x->props.family != family ||
499 x->id.proto != proto)
504 if (x->id.daddr.a4 != daddr->a4)
508 if (!ipv6_addr_equal((struct in6_addr *)daddr,
522 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
524 unsigned int h = xfrm_src_hash(daddr, saddr, family);
525 struct xfrm_state *x;
526 struct hlist_node *entry;
528 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
529 if (x->props.family != family ||
530 x->id.proto != proto)
535 if (x->id.daddr.a4 != daddr->a4 ||
536 x->props.saddr.a4 != saddr->a4)
540 if (!ipv6_addr_equal((struct in6_addr *)daddr,
543 !ipv6_addr_equal((struct in6_addr *)saddr,
557 static inline struct xfrm_state *
558 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
561 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
562 x->id.proto, family);
564 return __xfrm_state_lookup_byaddr(&x->id.daddr,
566 x->id.proto, family);
569 static void xfrm_hash_grow_check(int have_hash_collision)
571 if (have_hash_collision &&
572 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
573 xfrm_state_num > xfrm_state_hmask)
574 schedule_work(&xfrm_hash_work);
578 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
579 struct flowi *fl, struct xfrm_tmpl *tmpl,
580 struct xfrm_policy *pol, int *err,
581 unsigned short family)
583 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
584 struct hlist_node *entry;
585 struct xfrm_state *x, *x0;
586 int acquire_in_progress = 0;
588 struct xfrm_state *best = NULL;
590 spin_lock_bh(&xfrm_state_lock);
591 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
592 if (x->props.family == family &&
593 x->props.reqid == tmpl->reqid &&
594 !(x->props.flags & XFRM_STATE_WILDRECV) &&
595 xfrm_state_addr_check(x, daddr, saddr, family) &&
596 tmpl->mode == x->props.mode &&
597 tmpl->id.proto == x->id.proto &&
598 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
600 1. There is a valid state with matching selector.
602 2. Valid state with inappropriate selector. Skip.
604 Entering area of "sysdeps".
606 3. If state is not valid, selector is temporary,
607 it selects only session which triggered
608 previous resolution. Key manager will do
609 something to install a state with proper
612 if (x->km.state == XFRM_STATE_VALID) {
613 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
614 !security_xfrm_state_pol_flow_match(x, pol, fl))
617 best->km.dying > x->km.dying ||
618 (best->km.dying == x->km.dying &&
619 best->curlft.add_time < x->curlft.add_time))
621 } else if (x->km.state == XFRM_STATE_ACQ) {
622 acquire_in_progress = 1;
623 } else if (x->km.state == XFRM_STATE_ERROR ||
624 x->km.state == XFRM_STATE_EXPIRED) {
625 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
626 security_xfrm_state_pol_flow_match(x, pol, fl))
633 if (!x && !error && !acquire_in_progress) {
635 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
636 tmpl->id.proto, family)) != NULL) {
641 x = xfrm_state_alloc();
646 /* Initialize temporary selector matching only
647 * to current session. */
648 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
650 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
652 x->km.state = XFRM_STATE_DEAD;
658 if (km_query(x, tmpl, pol) == 0) {
659 x->km.state = XFRM_STATE_ACQ;
660 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
661 h = xfrm_src_hash(daddr, saddr, family);
662 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
664 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
665 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
667 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
668 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
669 add_timer(&x->timer);
671 xfrm_hash_grow_check(x->bydst.next != NULL);
673 x->km.state = XFRM_STATE_DEAD;
683 *err = acquire_in_progress ? -EAGAIN : error;
684 spin_unlock_bh(&xfrm_state_lock);
689 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
690 unsigned short family, u8 mode, u8 proto, u32 reqid)
692 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
693 struct xfrm_state *rx = NULL, *x = NULL;
694 struct hlist_node *entry;
696 spin_lock(&xfrm_state_lock);
697 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
698 if (x->props.family == family &&
699 x->props.reqid == reqid &&
700 !(x->props.flags & XFRM_STATE_WILDRECV) &&
701 xfrm_state_addr_check(x, daddr, saddr, family) &&
702 mode == x->props.mode &&
703 proto == x->id.proto &&
704 x->km.state == XFRM_STATE_VALID) {
712 spin_unlock(&xfrm_state_lock);
717 EXPORT_SYMBOL(xfrm_stateonly_find);
719 static void __xfrm_state_insert(struct xfrm_state *x)
723 x->genid = ++xfrm_state_genid;
725 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
726 x->props.reqid, x->props.family);
727 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
729 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
730 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
733 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
736 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
739 mod_timer(&x->timer, jiffies + HZ);
740 if (x->replay_maxage)
741 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
747 xfrm_hash_grow_check(x->bydst.next != NULL);
750 /* xfrm_state_lock is held */
751 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
753 unsigned short family = xnew->props.family;
754 u32 reqid = xnew->props.reqid;
755 struct xfrm_state *x;
756 struct hlist_node *entry;
759 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
760 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
761 if (x->props.family == family &&
762 x->props.reqid == reqid &&
763 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
764 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
765 x->genid = xfrm_state_genid;
769 void xfrm_state_insert(struct xfrm_state *x)
771 spin_lock_bh(&xfrm_state_lock);
772 __xfrm_state_bump_genids(x);
773 __xfrm_state_insert(x);
774 spin_unlock_bh(&xfrm_state_lock);
776 EXPORT_SYMBOL(xfrm_state_insert);
778 /* xfrm_state_lock is held */
779 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
781 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
782 struct hlist_node *entry;
783 struct xfrm_state *x;
785 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
786 if (x->props.reqid != reqid ||
787 x->props.mode != mode ||
788 x->props.family != family ||
789 x->km.state != XFRM_STATE_ACQ ||
791 x->id.proto != proto)
796 if (x->id.daddr.a4 != daddr->a4 ||
797 x->props.saddr.a4 != saddr->a4)
801 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
802 (struct in6_addr *)daddr) ||
803 !ipv6_addr_equal((struct in6_addr *)
805 (struct in6_addr *)saddr))
817 x = xfrm_state_alloc();
821 x->sel.daddr.a4 = daddr->a4;
822 x->sel.saddr.a4 = saddr->a4;
823 x->sel.prefixlen_d = 32;
824 x->sel.prefixlen_s = 32;
825 x->props.saddr.a4 = saddr->a4;
826 x->id.daddr.a4 = daddr->a4;
830 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
831 (struct in6_addr *)daddr);
832 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
833 (struct in6_addr *)saddr);
834 x->sel.prefixlen_d = 128;
835 x->sel.prefixlen_s = 128;
836 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
837 (struct in6_addr *)saddr);
838 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
839 (struct in6_addr *)daddr);
843 x->km.state = XFRM_STATE_ACQ;
845 x->props.family = family;
846 x->props.mode = mode;
847 x->props.reqid = reqid;
848 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
850 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
851 add_timer(&x->timer);
852 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
853 h = xfrm_src_hash(daddr, saddr, family);
854 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
859 xfrm_hash_grow_check(x->bydst.next != NULL);
865 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
867 int xfrm_state_add(struct xfrm_state *x)
869 struct xfrm_state *x1;
872 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
874 family = x->props.family;
876 spin_lock_bh(&xfrm_state_lock);
878 x1 = __xfrm_state_locate(x, use_spi, family);
886 if (use_spi && x->km.seq) {
887 x1 = __xfrm_find_acq_byseq(x->km.seq);
888 if (x1 && ((x1->id.proto != x->id.proto) ||
889 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
896 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
898 &x->id.daddr, &x->props.saddr, 0);
900 __xfrm_state_bump_genids(x);
901 __xfrm_state_insert(x);
905 spin_unlock_bh(&xfrm_state_lock);
908 xfrm_state_delete(x1);
914 EXPORT_SYMBOL(xfrm_state_add);
916 #ifdef CONFIG_XFRM_MIGRATE
917 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
920 struct xfrm_state *x = xfrm_state_alloc();
924 memcpy(&x->id, &orig->id, sizeof(x->id));
925 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
926 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
927 x->props.mode = orig->props.mode;
928 x->props.replay_window = orig->props.replay_window;
929 x->props.reqid = orig->props.reqid;
930 x->props.family = orig->props.family;
931 x->props.saddr = orig->props.saddr;
934 x->aalg = xfrm_algo_clone(orig->aalg);
938 x->props.aalgo = orig->props.aalgo;
941 x->ealg = xfrm_algo_clone(orig->ealg);
945 x->props.ealgo = orig->props.ealgo;
948 x->calg = xfrm_algo_clone(orig->calg);
952 x->props.calgo = orig->props.calgo;
955 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
961 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
967 err = xfrm_init_state(x);
971 x->props.flags = orig->props.flags;
973 x->curlft.add_time = orig->curlft.add_time;
974 x->km.state = orig->km.state;
975 x->km.seq = orig->km.seq;
992 EXPORT_SYMBOL(xfrm_state_clone);
994 /* xfrm_state_lock is held */
995 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
998 struct xfrm_state *x;
999 struct hlist_node *entry;
1002 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1003 m->reqid, m->old_family);
1004 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1005 if (x->props.mode != m->mode ||
1006 x->id.proto != m->proto)
1008 if (m->reqid && x->props.reqid != m->reqid)
1010 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1012 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1019 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1021 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1022 if (x->props.mode != m->mode ||
1023 x->id.proto != m->proto)
1025 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1027 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1037 EXPORT_SYMBOL(xfrm_migrate_state_find);
1039 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1040 struct xfrm_migrate *m)
1042 struct xfrm_state *xc;
1045 xc = xfrm_state_clone(x, &err);
1049 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1050 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1053 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1054 /* a care is needed when the destination address of the
1055 state is to be updated as it is a part of triplet */
1056 xfrm_state_insert(xc);
1058 if ((err = xfrm_state_add(xc)) < 0)
1067 EXPORT_SYMBOL(xfrm_state_migrate);
1070 int xfrm_state_update(struct xfrm_state *x)
1072 struct xfrm_state *x1;
1074 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1076 spin_lock_bh(&xfrm_state_lock);
1077 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1083 if (xfrm_state_kern(x1)) {
1089 if (x1->km.state == XFRM_STATE_ACQ) {
1090 __xfrm_state_insert(x);
1096 spin_unlock_bh(&xfrm_state_lock);
1102 xfrm_state_delete(x1);
1108 spin_lock_bh(&x1->lock);
1109 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1110 if (x->encap && x1->encap)
1111 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1112 if (x->coaddr && x1->coaddr) {
1113 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1115 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1116 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1117 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1120 mod_timer(&x1->timer, jiffies + HZ);
1121 if (x1->curlft.use_time)
1122 xfrm_state_check_expire(x1);
1126 spin_unlock_bh(&x1->lock);
1132 EXPORT_SYMBOL(xfrm_state_update);
1134 int xfrm_state_check_expire(struct xfrm_state *x)
1136 if (!x->curlft.use_time)
1137 x->curlft.use_time = get_seconds();
1139 if (x->km.state != XFRM_STATE_VALID)
1142 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1143 x->curlft.packets >= x->lft.hard_packet_limit) {
1144 x->km.state = XFRM_STATE_EXPIRED;
1145 mod_timer(&x->timer, jiffies);
1150 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1151 x->curlft.packets >= x->lft.soft_packet_limit)) {
1153 km_state_expired(x, 0, 0);
1157 EXPORT_SYMBOL(xfrm_state_check_expire);
1159 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
1161 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
1162 - skb_headroom(skb);
1165 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
1167 /* Check tail too... */
1171 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
1173 int err = xfrm_state_check_expire(x);
1176 err = xfrm_state_check_space(x, skb);
1180 EXPORT_SYMBOL(xfrm_state_check);
1183 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1184 unsigned short family)
1186 struct xfrm_state *x;
1188 spin_lock_bh(&xfrm_state_lock);
1189 x = __xfrm_state_lookup(daddr, spi, proto, family);
1190 spin_unlock_bh(&xfrm_state_lock);
1193 EXPORT_SYMBOL(xfrm_state_lookup);
1196 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1197 u8 proto, unsigned short family)
1199 struct xfrm_state *x;
1201 spin_lock_bh(&xfrm_state_lock);
1202 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1203 spin_unlock_bh(&xfrm_state_lock);
1206 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1209 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1210 xfrm_address_t *daddr, xfrm_address_t *saddr,
1211 int create, unsigned short family)
1213 struct xfrm_state *x;
1215 spin_lock_bh(&xfrm_state_lock);
1216 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1217 spin_unlock_bh(&xfrm_state_lock);
1221 EXPORT_SYMBOL(xfrm_find_acq);
1223 #ifdef CONFIG_XFRM_SUB_POLICY
1225 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1226 unsigned short family)
1229 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1231 return -EAFNOSUPPORT;
1233 spin_lock_bh(&xfrm_state_lock);
1234 if (afinfo->tmpl_sort)
1235 err = afinfo->tmpl_sort(dst, src, n);
1236 spin_unlock_bh(&xfrm_state_lock);
1237 xfrm_state_put_afinfo(afinfo);
1240 EXPORT_SYMBOL(xfrm_tmpl_sort);
1243 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1244 unsigned short family)
1247 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1249 return -EAFNOSUPPORT;
1251 spin_lock_bh(&xfrm_state_lock);
1252 if (afinfo->state_sort)
1253 err = afinfo->state_sort(dst, src, n);
1254 spin_unlock_bh(&xfrm_state_lock);
1255 xfrm_state_put_afinfo(afinfo);
1258 EXPORT_SYMBOL(xfrm_state_sort);
1261 /* Silly enough, but I'm lazy to build resolution list */
1263 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1267 for (i = 0; i <= xfrm_state_hmask; i++) {
1268 struct hlist_node *entry;
1269 struct xfrm_state *x;
1271 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1272 if (x->km.seq == seq &&
1273 x->km.state == XFRM_STATE_ACQ) {
1282 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1284 struct xfrm_state *x;
1286 spin_lock_bh(&xfrm_state_lock);
1287 x = __xfrm_find_acq_byseq(seq);
1288 spin_unlock_bh(&xfrm_state_lock);
1291 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1293 u32 xfrm_get_acqseq(void)
1297 static DEFINE_SPINLOCK(acqseq_lock);
1299 spin_lock_bh(&acqseq_lock);
1300 res = (++acqseq ? : ++acqseq);
1301 spin_unlock_bh(&acqseq_lock);
1304 EXPORT_SYMBOL(xfrm_get_acqseq);
1307 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1310 struct xfrm_state *x0;
1315 if (minspi == maxspi) {
1316 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1324 u32 low = ntohl(minspi);
1325 u32 high = ntohl(maxspi);
1326 for (h=0; h<high-low+1; h++) {
1327 spi = low + net_random()%(high-low+1);
1328 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1330 x->id.spi = htonl(spi);
1337 spin_lock_bh(&xfrm_state_lock);
1338 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1339 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1340 spin_unlock_bh(&xfrm_state_lock);
1344 EXPORT_SYMBOL(xfrm_alloc_spi);
1346 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1350 struct xfrm_state *x, *last = NULL;
1351 struct hlist_node *entry;
1355 spin_lock_bh(&xfrm_state_lock);
1356 for (i = 0; i <= xfrm_state_hmask; i++) {
1357 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1358 if (!xfrm_id_proto_match(x->id.proto, proto))
1361 err = func(last, count, data);
1373 err = func(last, 0, data);
1375 spin_unlock_bh(&xfrm_state_lock);
1378 EXPORT_SYMBOL(xfrm_state_walk);
1381 void xfrm_replay_notify(struct xfrm_state *x, int event)
1384 /* we send notify messages in case
1385 * 1. we updated on of the sequence numbers, and the seqno difference
1386 * is at least x->replay_maxdiff, in this case we also update the
1387 * timeout of our timer function
1388 * 2. if x->replay_maxage has elapsed since last update,
1389 * and there were changes
1391 * The state structure must be locked!
1395 case XFRM_REPLAY_UPDATE:
1396 if (x->replay_maxdiff &&
1397 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1398 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1399 if (x->xflags & XFRM_TIME_DEFER)
1400 event = XFRM_REPLAY_TIMEOUT;
1407 case XFRM_REPLAY_TIMEOUT:
1408 if ((x->replay.seq == x->preplay.seq) &&
1409 (x->replay.bitmap == x->preplay.bitmap) &&
1410 (x->replay.oseq == x->preplay.oseq)) {
1411 x->xflags |= XFRM_TIME_DEFER;
1418 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1419 c.event = XFRM_MSG_NEWAE;
1420 c.data.aevent = event;
1421 km_state_notify(x, &c);
1423 if (x->replay_maxage &&
1424 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1425 x->xflags &= ~XFRM_TIME_DEFER;
1427 EXPORT_SYMBOL(xfrm_replay_notify);
1429 static void xfrm_replay_timer_handler(unsigned long data)
1431 struct xfrm_state *x = (struct xfrm_state*)data;
1433 spin_lock(&x->lock);
1435 if (x->km.state == XFRM_STATE_VALID) {
1436 if (xfrm_aevent_is_on())
1437 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1439 x->xflags |= XFRM_TIME_DEFER;
1442 spin_unlock(&x->lock);
1445 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1448 u32 seq = ntohl(net_seq);
1450 if (unlikely(seq == 0))
1453 if (likely(seq > x->replay.seq))
1456 diff = x->replay.seq - seq;
1457 if (diff >= min_t(unsigned int, x->props.replay_window,
1458 sizeof(x->replay.bitmap) * 8)) {
1459 x->stats.replay_window++;
1463 if (x->replay.bitmap & (1U << diff)) {
1469 EXPORT_SYMBOL(xfrm_replay_check);
1471 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1474 u32 seq = ntohl(net_seq);
1476 if (seq > x->replay.seq) {
1477 diff = seq - x->replay.seq;
1478 if (diff < x->props.replay_window)
1479 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1481 x->replay.bitmap = 1;
1482 x->replay.seq = seq;
1484 diff = x->replay.seq - seq;
1485 x->replay.bitmap |= (1U << diff);
1488 if (xfrm_aevent_is_on())
1489 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1491 EXPORT_SYMBOL(xfrm_replay_advance);
1493 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1494 static DEFINE_RWLOCK(xfrm_km_lock);
1496 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1498 struct xfrm_mgr *km;
1500 read_lock(&xfrm_km_lock);
1501 list_for_each_entry(km, &xfrm_km_list, list)
1502 if (km->notify_policy)
1503 km->notify_policy(xp, dir, c);
1504 read_unlock(&xfrm_km_lock);
1507 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1509 struct xfrm_mgr *km;
1510 read_lock(&xfrm_km_lock);
1511 list_for_each_entry(km, &xfrm_km_list, list)
1514 read_unlock(&xfrm_km_lock);
1517 EXPORT_SYMBOL(km_policy_notify);
1518 EXPORT_SYMBOL(km_state_notify);
1520 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1526 c.event = XFRM_MSG_EXPIRE;
1527 km_state_notify(x, &c);
1533 EXPORT_SYMBOL(km_state_expired);
1535 * We send to all registered managers regardless of failure
1536 * We are happy with one success
1538 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1540 int err = -EINVAL, acqret;
1541 struct xfrm_mgr *km;
1543 read_lock(&xfrm_km_lock);
1544 list_for_each_entry(km, &xfrm_km_list, list) {
1545 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1549 read_unlock(&xfrm_km_lock);
1552 EXPORT_SYMBOL(km_query);
1554 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1557 struct xfrm_mgr *km;
1559 read_lock(&xfrm_km_lock);
1560 list_for_each_entry(km, &xfrm_km_list, list) {
1561 if (km->new_mapping)
1562 err = km->new_mapping(x, ipaddr, sport);
1566 read_unlock(&xfrm_km_lock);
1569 EXPORT_SYMBOL(km_new_mapping);
1571 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1577 c.event = XFRM_MSG_POLEXPIRE;
1578 km_policy_notify(pol, dir, &c);
1583 EXPORT_SYMBOL(km_policy_expired);
1585 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1586 struct xfrm_migrate *m, int num_migrate)
1590 struct xfrm_mgr *km;
1592 read_lock(&xfrm_km_lock);
1593 list_for_each_entry(km, &xfrm_km_list, list) {
1595 ret = km->migrate(sel, dir, type, m, num_migrate);
1600 read_unlock(&xfrm_km_lock);
1603 EXPORT_SYMBOL(km_migrate);
1605 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1609 struct xfrm_mgr *km;
1611 read_lock(&xfrm_km_lock);
1612 list_for_each_entry(km, &xfrm_km_list, list) {
1614 ret = km->report(proto, sel, addr);
1619 read_unlock(&xfrm_km_lock);
1622 EXPORT_SYMBOL(km_report);
1624 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1628 struct xfrm_mgr *km;
1629 struct xfrm_policy *pol = NULL;
1631 if (optlen <= 0 || optlen > PAGE_SIZE)
1634 data = kmalloc(optlen, GFP_KERNEL);
1639 if (copy_from_user(data, optval, optlen))
1643 read_lock(&xfrm_km_lock);
1644 list_for_each_entry(km, &xfrm_km_list, list) {
1645 pol = km->compile_policy(sk, optname, data,
1650 read_unlock(&xfrm_km_lock);
1653 xfrm_sk_policy_insert(sk, err, pol);
1662 EXPORT_SYMBOL(xfrm_user_policy);
1664 int xfrm_register_km(struct xfrm_mgr *km)
1666 write_lock_bh(&xfrm_km_lock);
1667 list_add_tail(&km->list, &xfrm_km_list);
1668 write_unlock_bh(&xfrm_km_lock);
1671 EXPORT_SYMBOL(xfrm_register_km);
1673 int xfrm_unregister_km(struct xfrm_mgr *km)
1675 write_lock_bh(&xfrm_km_lock);
1676 list_del(&km->list);
1677 write_unlock_bh(&xfrm_km_lock);
1680 EXPORT_SYMBOL(xfrm_unregister_km);
1682 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1685 if (unlikely(afinfo == NULL))
1687 if (unlikely(afinfo->family >= NPROTO))
1688 return -EAFNOSUPPORT;
1689 write_lock_bh(&xfrm_state_afinfo_lock);
1690 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1693 xfrm_state_afinfo[afinfo->family] = afinfo;
1694 write_unlock_bh(&xfrm_state_afinfo_lock);
1697 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1699 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1702 if (unlikely(afinfo == NULL))
1704 if (unlikely(afinfo->family >= NPROTO))
1705 return -EAFNOSUPPORT;
1706 write_lock_bh(&xfrm_state_afinfo_lock);
1707 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1708 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1711 xfrm_state_afinfo[afinfo->family] = NULL;
1713 write_unlock_bh(&xfrm_state_afinfo_lock);
1716 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1718 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1720 struct xfrm_state_afinfo *afinfo;
1721 if (unlikely(family >= NPROTO))
1723 read_lock(&xfrm_state_afinfo_lock);
1724 afinfo = xfrm_state_afinfo[family];
1725 if (unlikely(!afinfo))
1726 read_unlock(&xfrm_state_afinfo_lock);
1730 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1732 read_unlock(&xfrm_state_afinfo_lock);
1735 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1736 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1738 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1739 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1742 struct xfrm_state *t = x->tunnel;
1744 if (atomic_read(&t->tunnel_users) == 2)
1745 xfrm_state_delete(t);
1746 atomic_dec(&t->tunnel_users);
1751 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1753 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1757 spin_lock_bh(&x->lock);
1758 if (x->km.state == XFRM_STATE_VALID &&
1759 x->type && x->type->get_mtu)
1760 res = x->type->get_mtu(x, mtu);
1762 res = mtu - x->props.header_len;
1763 spin_unlock_bh(&x->lock);
1767 int xfrm_init_state(struct xfrm_state *x)
1769 struct xfrm_state_afinfo *afinfo;
1770 int family = x->props.family;
1773 err = -EAFNOSUPPORT;
1774 afinfo = xfrm_state_get_afinfo(family);
1779 if (afinfo->init_flags)
1780 err = afinfo->init_flags(x);
1782 xfrm_state_put_afinfo(afinfo);
1787 err = -EPROTONOSUPPORT;
1788 x->type = xfrm_get_type(x->id.proto, family);
1789 if (x->type == NULL)
1792 err = x->type->init_state(x);
1796 x->mode = xfrm_get_mode(x->props.mode, family);
1797 if (x->mode == NULL)
1800 x->km.state = XFRM_STATE_VALID;
1806 EXPORT_SYMBOL(xfrm_init_state);
1808 void __init xfrm_state_init(void)
1812 sz = sizeof(struct hlist_head) * 8;
1814 xfrm_state_bydst = xfrm_hash_alloc(sz);
1815 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1816 xfrm_state_byspi = xfrm_hash_alloc(sz);
1817 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1818 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1819 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1821 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);