6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static void __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
80 static void xfrm_state_gc_task(void *data)
83 struct list_head *entry, *tmp;
84 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
86 if (xfrm_state_gc_flush_bundles) {
87 xfrm_state_gc_flush_bundles = 0;
91 spin_lock_bh(&xfrm_state_gc_lock);
92 list_splice_init(&xfrm_state_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm_state_gc_lock);
95 list_for_each_safe(entry, tmp, &gc_list) {
96 x = list_entry(entry, struct xfrm_state, bydst);
97 xfrm_state_gc_destroy(x);
102 static inline unsigned long make_jiffies(long secs)
104 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
105 return MAX_SCHEDULE_TIMEOUT-1;
110 static void xfrm_timer_handler(unsigned long data)
112 struct xfrm_state *x = (struct xfrm_state*)data;
113 unsigned long now = (unsigned long)xtime.tv_sec;
114 long next = LONG_MAX;
118 if (x->km.state == XFRM_STATE_DEAD)
120 if (x->km.state == XFRM_STATE_EXPIRED)
122 if (x->lft.hard_add_expires_seconds) {
123 long tmo = x->lft.hard_add_expires_seconds +
124 x->curlft.add_time - now;
130 if (x->lft.hard_use_expires_seconds) {
131 long tmo = x->lft.hard_use_expires_seconds +
132 (x->curlft.use_time ? : now) - now;
140 if (x->lft.soft_add_expires_seconds) {
141 long tmo = x->lft.soft_add_expires_seconds +
142 x->curlft.add_time - now;
148 if (x->lft.soft_use_expires_seconds) {
149 long tmo = x->lft.soft_use_expires_seconds +
150 (x->curlft.use_time ? : now) - now;
158 km_state_expired(x, 0);
160 if (next != LONG_MAX &&
161 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
166 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
167 x->km.state = XFRM_STATE_EXPIRED;
173 km_state_expired(x, 1);
174 __xfrm_state_delete(x);
177 spin_unlock(&x->lock);
181 struct xfrm_state *xfrm_state_alloc(void)
183 struct xfrm_state *x;
185 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
188 memset(x, 0, sizeof(struct xfrm_state));
189 atomic_set(&x->refcnt, 1);
190 atomic_set(&x->tunnel_users, 0);
191 INIT_LIST_HEAD(&x->bydst);
192 INIT_LIST_HEAD(&x->byspi);
193 init_timer(&x->timer);
194 x->timer.function = xfrm_timer_handler;
195 x->timer.data = (unsigned long)x;
196 x->curlft.add_time = (unsigned long)xtime.tv_sec;
197 x->lft.soft_byte_limit = XFRM_INF;
198 x->lft.soft_packet_limit = XFRM_INF;
199 x->lft.hard_byte_limit = XFRM_INF;
200 x->lft.hard_packet_limit = XFRM_INF;
201 spin_lock_init(&x->lock);
205 EXPORT_SYMBOL(xfrm_state_alloc);
207 void __xfrm_state_destroy(struct xfrm_state *x)
209 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
211 spin_lock_bh(&xfrm_state_gc_lock);
212 list_add(&x->bydst, &xfrm_state_gc_list);
213 spin_unlock_bh(&xfrm_state_gc_lock);
214 schedule_work(&xfrm_state_gc_work);
216 EXPORT_SYMBOL(__xfrm_state_destroy);
218 static void __xfrm_state_delete(struct xfrm_state *x)
220 if (x->km.state != XFRM_STATE_DEAD) {
221 x->km.state = XFRM_STATE_DEAD;
222 spin_lock(&xfrm_state_lock);
224 atomic_dec(&x->refcnt);
227 atomic_dec(&x->refcnt);
229 spin_unlock(&xfrm_state_lock);
230 if (del_timer(&x->timer))
231 atomic_dec(&x->refcnt);
233 /* The number two in this test is the reference
234 * mentioned in the comment below plus the reference
235 * our caller holds. A larger value means that
236 * there are DSTs attached to this xfrm_state.
238 if (atomic_read(&x->refcnt) > 2) {
239 xfrm_state_gc_flush_bundles = 1;
240 schedule_work(&xfrm_state_gc_work);
243 /* All xfrm_state objects are created by xfrm_state_alloc.
244 * The xfrm_state_alloc call gives a reference, and that
245 * is what we are dropping here.
247 atomic_dec(&x->refcnt);
251 void xfrm_state_delete(struct xfrm_state *x)
253 spin_lock_bh(&x->lock);
254 __xfrm_state_delete(x);
255 spin_unlock_bh(&x->lock);
257 EXPORT_SYMBOL(xfrm_state_delete);
259 void xfrm_state_flush(u8 proto)
262 struct xfrm_state *x;
264 spin_lock_bh(&xfrm_state_lock);
265 for (i = 0; i < XFRM_DST_HSIZE; i++) {
267 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
268 if (!xfrm_state_kern(x) &&
269 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
271 spin_unlock_bh(&xfrm_state_lock);
273 xfrm_state_delete(x);
276 spin_lock_bh(&xfrm_state_lock);
281 spin_unlock_bh(&xfrm_state_lock);
284 EXPORT_SYMBOL(xfrm_state_flush);
287 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
288 struct xfrm_tmpl *tmpl,
289 xfrm_address_t *daddr, xfrm_address_t *saddr,
290 unsigned short family)
292 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
295 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
296 xfrm_state_put_afinfo(afinfo);
301 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
302 struct flowi *fl, struct xfrm_tmpl *tmpl,
303 struct xfrm_policy *pol, int *err,
304 unsigned short family)
306 unsigned h = xfrm_dst_hash(daddr, family);
307 struct xfrm_state *x, *x0;
308 int acquire_in_progress = 0;
310 struct xfrm_state *best = NULL;
311 struct xfrm_state_afinfo *afinfo;
313 afinfo = xfrm_state_get_afinfo(family);
314 if (afinfo == NULL) {
315 *err = -EAFNOSUPPORT;
319 spin_lock_bh(&xfrm_state_lock);
320 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
321 if (x->props.family == family &&
322 x->props.reqid == tmpl->reqid &&
323 xfrm_state_addr_check(x, daddr, saddr, family) &&
324 tmpl->mode == x->props.mode &&
325 tmpl->id.proto == x->id.proto &&
326 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
328 1. There is a valid state with matching selector.
330 2. Valid state with inappropriate selector. Skip.
332 Entering area of "sysdeps".
334 3. If state is not valid, selector is temporary,
335 it selects only session which triggered
336 previous resolution. Key manager will do
337 something to install a state with proper
340 if (x->km.state == XFRM_STATE_VALID) {
341 if (!xfrm_selector_match(&x->sel, fl, family))
344 best->km.dying > x->km.dying ||
345 (best->km.dying == x->km.dying &&
346 best->curlft.add_time < x->curlft.add_time))
348 } else if (x->km.state == XFRM_STATE_ACQ) {
349 acquire_in_progress = 1;
350 } else if (x->km.state == XFRM_STATE_ERROR ||
351 x->km.state == XFRM_STATE_EXPIRED) {
352 if (xfrm_selector_match(&x->sel, fl, family))
359 if (!x && !error && !acquire_in_progress) {
361 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
362 tmpl->id.proto)) != NULL) {
367 x = xfrm_state_alloc();
372 /* Initialize temporary selector matching only
373 * to current session. */
374 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
376 if (km_query(x, tmpl, pol) == 0) {
377 x->km.state = XFRM_STATE_ACQ;
378 list_add_tail(&x->bydst, xfrm_state_bydst+h);
381 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
382 list_add(&x->byspi, xfrm_state_byspi+h);
385 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
387 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
388 add_timer(&x->timer);
390 x->km.state = XFRM_STATE_DEAD;
400 *err = acquire_in_progress ? -EAGAIN : error;
401 spin_unlock_bh(&xfrm_state_lock);
402 xfrm_state_put_afinfo(afinfo);
406 static void __xfrm_state_insert(struct xfrm_state *x)
408 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
410 list_add(&x->bydst, xfrm_state_bydst+h);
413 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
415 list_add(&x->byspi, xfrm_state_byspi+h);
418 if (!mod_timer(&x->timer, jiffies + HZ))
424 void xfrm_state_insert(struct xfrm_state *x)
426 spin_lock_bh(&xfrm_state_lock);
427 __xfrm_state_insert(x);
428 spin_unlock_bh(&xfrm_state_lock);
430 EXPORT_SYMBOL(xfrm_state_insert);
432 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
434 int xfrm_state_add(struct xfrm_state *x)
436 struct xfrm_state_afinfo *afinfo;
437 struct xfrm_state *x1;
441 family = x->props.family;
442 afinfo = xfrm_state_get_afinfo(family);
443 if (unlikely(afinfo == NULL))
444 return -EAFNOSUPPORT;
446 spin_lock_bh(&xfrm_state_lock);
448 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
457 x1 = __xfrm_find_acq_byseq(x->km.seq);
458 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
465 x1 = afinfo->find_acq(
466 x->props.mode, x->props.reqid, x->id.proto,
467 &x->id.daddr, &x->props.saddr, 0);
469 __xfrm_state_insert(x);
473 spin_unlock_bh(&xfrm_state_lock);
474 xfrm_state_put_afinfo(afinfo);
477 xfrm_state_delete(x1);
483 EXPORT_SYMBOL(xfrm_state_add);
485 int xfrm_state_update(struct xfrm_state *x)
487 struct xfrm_state_afinfo *afinfo;
488 struct xfrm_state *x1;
491 afinfo = xfrm_state_get_afinfo(x->props.family);
492 if (unlikely(afinfo == NULL))
493 return -EAFNOSUPPORT;
495 spin_lock_bh(&xfrm_state_lock);
496 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
502 if (xfrm_state_kern(x1)) {
508 if (x1->km.state == XFRM_STATE_ACQ) {
509 __xfrm_state_insert(x);
515 spin_unlock_bh(&xfrm_state_lock);
516 xfrm_state_put_afinfo(afinfo);
522 xfrm_state_delete(x1);
528 spin_lock_bh(&x1->lock);
529 if (likely(x1->km.state == XFRM_STATE_VALID)) {
530 if (x->encap && x1->encap)
531 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
532 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
535 if (!mod_timer(&x1->timer, jiffies + HZ))
537 if (x1->curlft.use_time)
538 xfrm_state_check_expire(x1);
542 spin_unlock_bh(&x1->lock);
548 EXPORT_SYMBOL(xfrm_state_update);
550 int xfrm_state_check_expire(struct xfrm_state *x)
552 if (!x->curlft.use_time)
553 x->curlft.use_time = (unsigned long)xtime.tv_sec;
555 if (x->km.state != XFRM_STATE_VALID)
558 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
559 x->curlft.packets >= x->lft.hard_packet_limit) {
560 km_state_expired(x, 1);
561 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
567 (x->curlft.bytes >= x->lft.soft_byte_limit ||
568 x->curlft.packets >= x->lft.soft_packet_limit))
569 km_state_expired(x, 0);
572 EXPORT_SYMBOL(xfrm_state_check_expire);
574 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
576 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
580 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
582 /* Check tail too... */
586 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
588 int err = xfrm_state_check_expire(x);
591 err = xfrm_state_check_space(x, skb);
595 EXPORT_SYMBOL(xfrm_state_check);
598 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
599 unsigned short family)
601 struct xfrm_state *x;
602 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
606 spin_lock_bh(&xfrm_state_lock);
607 x = afinfo->state_lookup(daddr, spi, proto);
608 spin_unlock_bh(&xfrm_state_lock);
609 xfrm_state_put_afinfo(afinfo);
612 EXPORT_SYMBOL(xfrm_state_lookup);
615 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
616 xfrm_address_t *daddr, xfrm_address_t *saddr,
617 int create, unsigned short family)
619 struct xfrm_state *x;
620 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
624 spin_lock_bh(&xfrm_state_lock);
625 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
626 spin_unlock_bh(&xfrm_state_lock);
627 xfrm_state_put_afinfo(afinfo);
630 EXPORT_SYMBOL(xfrm_find_acq);
632 /* Silly enough, but I'm lazy to build resolution list */
634 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
637 struct xfrm_state *x;
639 for (i = 0; i < XFRM_DST_HSIZE; i++) {
640 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
641 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
650 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
652 struct xfrm_state *x;
654 spin_lock_bh(&xfrm_state_lock);
655 x = __xfrm_find_acq_byseq(seq);
656 spin_unlock_bh(&xfrm_state_lock);
659 EXPORT_SYMBOL(xfrm_find_acq_byseq);
661 u32 xfrm_get_acqseq(void)
665 static DEFINE_SPINLOCK(acqseq_lock);
667 spin_lock_bh(&acqseq_lock);
668 res = (++acqseq ? : ++acqseq);
669 spin_unlock_bh(&acqseq_lock);
672 EXPORT_SYMBOL(xfrm_get_acqseq);
675 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
678 struct xfrm_state *x0;
683 if (minspi == maxspi) {
684 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
692 minspi = ntohl(minspi);
693 maxspi = ntohl(maxspi);
694 for (h=0; h<maxspi-minspi+1; h++) {
695 spi = minspi + net_random()%(maxspi-minspi+1);
696 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
698 x->id.spi = htonl(spi);
705 spin_lock_bh(&xfrm_state_lock);
706 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
707 list_add(&x->byspi, xfrm_state_byspi+h);
709 spin_unlock_bh(&xfrm_state_lock);
713 EXPORT_SYMBOL(xfrm_alloc_spi);
715 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
719 struct xfrm_state *x;
723 spin_lock_bh(&xfrm_state_lock);
724 for (i = 0; i < XFRM_DST_HSIZE; i++) {
725 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
726 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
735 for (i = 0; i < XFRM_DST_HSIZE; i++) {
736 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
737 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
739 err = func(x, --count, data);
745 spin_unlock_bh(&xfrm_state_lock);
748 EXPORT_SYMBOL(xfrm_state_walk);
750 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
756 if (unlikely(seq == 0))
759 if (likely(seq > x->replay.seq))
762 diff = x->replay.seq - seq;
763 if (diff >= x->props.replay_window) {
764 x->stats.replay_window++;
768 if (x->replay.bitmap & (1U << diff)) {
774 EXPORT_SYMBOL(xfrm_replay_check);
776 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
782 if (seq > x->replay.seq) {
783 diff = seq - x->replay.seq;
784 if (diff < x->props.replay_window)
785 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
787 x->replay.bitmap = 1;
790 diff = x->replay.seq - seq;
791 x->replay.bitmap |= (1U << diff);
794 EXPORT_SYMBOL(xfrm_replay_advance);
796 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
797 static DEFINE_RWLOCK(xfrm_km_lock);
799 static void km_state_expired(struct xfrm_state *x, int hard)
804 x->km.state = XFRM_STATE_EXPIRED;
808 read_lock(&xfrm_km_lock);
809 list_for_each_entry(km, &xfrm_km_list, list)
811 read_unlock(&xfrm_km_lock);
817 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
822 read_lock(&xfrm_km_lock);
823 list_for_each_entry(km, &xfrm_km_list, list) {
824 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
828 read_unlock(&xfrm_km_lock);
832 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
837 read_lock(&xfrm_km_lock);
838 list_for_each_entry(km, &xfrm_km_list, list) {
840 err = km->new_mapping(x, ipaddr, sport);
844 read_unlock(&xfrm_km_lock);
847 EXPORT_SYMBOL(km_new_mapping);
849 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
853 read_lock(&xfrm_km_lock);
854 list_for_each_entry(km, &xfrm_km_list, list)
855 if (km->notify_policy)
856 km->notify_policy(pol, dir, hard);
857 read_unlock(&xfrm_km_lock);
863 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
868 struct xfrm_policy *pol = NULL;
870 if (optlen <= 0 || optlen > PAGE_SIZE)
873 data = kmalloc(optlen, GFP_KERNEL);
878 if (copy_from_user(data, optval, optlen))
882 read_lock(&xfrm_km_lock);
883 list_for_each_entry(km, &xfrm_km_list, list) {
884 pol = km->compile_policy(sk->sk_family, optname, data,
889 read_unlock(&xfrm_km_lock);
892 xfrm_sk_policy_insert(sk, err, pol);
901 EXPORT_SYMBOL(xfrm_user_policy);
903 int xfrm_register_km(struct xfrm_mgr *km)
905 write_lock_bh(&xfrm_km_lock);
906 list_add_tail(&km->list, &xfrm_km_list);
907 write_unlock_bh(&xfrm_km_lock);
910 EXPORT_SYMBOL(xfrm_register_km);
912 int xfrm_unregister_km(struct xfrm_mgr *km)
914 write_lock_bh(&xfrm_km_lock);
916 write_unlock_bh(&xfrm_km_lock);
919 EXPORT_SYMBOL(xfrm_unregister_km);
921 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
924 if (unlikely(afinfo == NULL))
926 if (unlikely(afinfo->family >= NPROTO))
927 return -EAFNOSUPPORT;
928 write_lock(&xfrm_state_afinfo_lock);
929 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
932 afinfo->state_bydst = xfrm_state_bydst;
933 afinfo->state_byspi = xfrm_state_byspi;
934 xfrm_state_afinfo[afinfo->family] = afinfo;
936 write_unlock(&xfrm_state_afinfo_lock);
939 EXPORT_SYMBOL(xfrm_state_register_afinfo);
941 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
944 if (unlikely(afinfo == NULL))
946 if (unlikely(afinfo->family >= NPROTO))
947 return -EAFNOSUPPORT;
948 write_lock(&xfrm_state_afinfo_lock);
949 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
950 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
953 xfrm_state_afinfo[afinfo->family] = NULL;
954 afinfo->state_byspi = NULL;
955 afinfo->state_bydst = NULL;
958 write_unlock(&xfrm_state_afinfo_lock);
961 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
963 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
965 struct xfrm_state_afinfo *afinfo;
966 if (unlikely(family >= NPROTO))
968 read_lock(&xfrm_state_afinfo_lock);
969 afinfo = xfrm_state_afinfo[family];
970 if (likely(afinfo != NULL))
971 read_lock(&afinfo->lock);
972 read_unlock(&xfrm_state_afinfo_lock);
976 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
978 if (unlikely(afinfo == NULL))
980 read_unlock(&afinfo->lock);
983 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
984 void xfrm_state_delete_tunnel(struct xfrm_state *x)
987 struct xfrm_state *t = x->tunnel;
989 if (atomic_read(&t->tunnel_users) == 2)
990 xfrm_state_delete(t);
991 atomic_dec(&t->tunnel_users);
996 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
998 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1002 res -= x->props.header_len;
1010 spin_lock_bh(&x->lock);
1011 if (x->km.state == XFRM_STATE_VALID &&
1012 x->type && x->type->get_max_size)
1013 m = x->type->get_max_size(x, m);
1015 m += x->props.header_len;
1016 spin_unlock_bh(&x->lock);
1026 EXPORT_SYMBOL(xfrm_state_mtu);
1028 void __init xfrm_state_init(void)
1032 for (i=0; i<XFRM_DST_HSIZE; i++) {
1033 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1034 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1036 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);