6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static int __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
70 x->type->destructor(x);
71 xfrm_put_type(x->type);
73 security_xfrm_state_free(x);
77 static void xfrm_state_gc_task(void *data)
80 struct list_head *entry, *tmp;
81 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
83 if (xfrm_state_gc_flush_bundles) {
84 xfrm_state_gc_flush_bundles = 0;
88 spin_lock_bh(&xfrm_state_gc_lock);
89 list_splice_init(&xfrm_state_gc_list, &gc_list);
90 spin_unlock_bh(&xfrm_state_gc_lock);
92 list_for_each_safe(entry, tmp, &gc_list) {
93 x = list_entry(entry, struct xfrm_state, bydst);
94 xfrm_state_gc_destroy(x);
99 static inline unsigned long make_jiffies(long secs)
101 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
102 return MAX_SCHEDULE_TIMEOUT-1;
107 static void xfrm_timer_handler(unsigned long data)
109 struct xfrm_state *x = (struct xfrm_state*)data;
110 unsigned long now = (unsigned long)xtime.tv_sec;
111 long next = LONG_MAX;
115 if (x->km.state == XFRM_STATE_DEAD)
117 if (x->km.state == XFRM_STATE_EXPIRED)
119 if (x->lft.hard_add_expires_seconds) {
120 long tmo = x->lft.hard_add_expires_seconds +
121 x->curlft.add_time - now;
127 if (x->lft.hard_use_expires_seconds) {
128 long tmo = x->lft.hard_use_expires_seconds +
129 (x->curlft.use_time ? : now) - now;
137 if (x->lft.soft_add_expires_seconds) {
138 long tmo = x->lft.soft_add_expires_seconds +
139 x->curlft.add_time - now;
145 if (x->lft.soft_use_expires_seconds) {
146 long tmo = x->lft.soft_use_expires_seconds +
147 (x->curlft.use_time ? : now) - now;
156 km_state_expired(x, 0);
158 if (next != LONG_MAX &&
159 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
164 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
165 x->km.state = XFRM_STATE_EXPIRED;
170 if (!__xfrm_state_delete(x) && x->id.spi)
171 km_state_expired(x, 1);
174 spin_unlock(&x->lock);
178 struct xfrm_state *xfrm_state_alloc(void)
180 struct xfrm_state *x;
182 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
185 memset(x, 0, sizeof(struct xfrm_state));
186 atomic_set(&x->refcnt, 1);
187 atomic_set(&x->tunnel_users, 0);
188 INIT_LIST_HEAD(&x->bydst);
189 INIT_LIST_HEAD(&x->byspi);
190 init_timer(&x->timer);
191 x->timer.function = xfrm_timer_handler;
192 x->timer.data = (unsigned long)x;
193 x->curlft.add_time = (unsigned long)xtime.tv_sec;
194 x->lft.soft_byte_limit = XFRM_INF;
195 x->lft.soft_packet_limit = XFRM_INF;
196 x->lft.hard_byte_limit = XFRM_INF;
197 x->lft.hard_packet_limit = XFRM_INF;
198 spin_lock_init(&x->lock);
202 EXPORT_SYMBOL(xfrm_state_alloc);
204 void __xfrm_state_destroy(struct xfrm_state *x)
206 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
208 spin_lock_bh(&xfrm_state_gc_lock);
209 list_add(&x->bydst, &xfrm_state_gc_list);
210 spin_unlock_bh(&xfrm_state_gc_lock);
211 schedule_work(&xfrm_state_gc_work);
213 EXPORT_SYMBOL(__xfrm_state_destroy);
215 static int __xfrm_state_delete(struct xfrm_state *x)
219 if (x->km.state != XFRM_STATE_DEAD) {
220 x->km.state = XFRM_STATE_DEAD;
221 spin_lock(&xfrm_state_lock);
223 atomic_dec(&x->refcnt);
226 atomic_dec(&x->refcnt);
228 spin_unlock(&xfrm_state_lock);
229 if (del_timer(&x->timer))
230 atomic_dec(&x->refcnt);
232 /* The number two in this test is the reference
233 * mentioned in the comment below plus the reference
234 * our caller holds. A larger value means that
235 * there are DSTs attached to this xfrm_state.
237 if (atomic_read(&x->refcnt) > 2) {
238 xfrm_state_gc_flush_bundles = 1;
239 schedule_work(&xfrm_state_gc_work);
242 /* All xfrm_state objects are created by xfrm_state_alloc.
243 * The xfrm_state_alloc call gives a reference, and that
244 * is what we are dropping here.
246 atomic_dec(&x->refcnt);
253 int xfrm_state_delete(struct xfrm_state *x)
257 spin_lock_bh(&x->lock);
258 err = __xfrm_state_delete(x);
259 spin_unlock_bh(&x->lock);
263 EXPORT_SYMBOL(xfrm_state_delete);
265 void xfrm_state_flush(u8 proto)
268 struct xfrm_state *x;
270 spin_lock_bh(&xfrm_state_lock);
271 for (i = 0; i < XFRM_DST_HSIZE; i++) {
273 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
274 if (!xfrm_state_kern(x) &&
275 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
277 spin_unlock_bh(&xfrm_state_lock);
279 xfrm_state_delete(x);
282 spin_lock_bh(&xfrm_state_lock);
287 spin_unlock_bh(&xfrm_state_lock);
290 EXPORT_SYMBOL(xfrm_state_flush);
293 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
294 struct xfrm_tmpl *tmpl,
295 xfrm_address_t *daddr, xfrm_address_t *saddr,
296 unsigned short family)
298 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
301 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
302 xfrm_state_put_afinfo(afinfo);
307 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
308 struct flowi *fl, struct xfrm_tmpl *tmpl,
309 struct xfrm_policy *pol, int *err,
310 unsigned short family)
312 unsigned h = xfrm_dst_hash(daddr, family);
313 struct xfrm_state *x, *x0;
314 int acquire_in_progress = 0;
316 struct xfrm_state *best = NULL;
317 struct xfrm_state_afinfo *afinfo;
319 afinfo = xfrm_state_get_afinfo(family);
320 if (afinfo == NULL) {
321 *err = -EAFNOSUPPORT;
325 spin_lock_bh(&xfrm_state_lock);
326 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
327 if (x->props.family == family &&
328 x->props.reqid == tmpl->reqid &&
329 xfrm_state_addr_check(x, daddr, saddr, family) &&
330 tmpl->mode == x->props.mode &&
331 tmpl->id.proto == x->id.proto &&
332 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
334 1. There is a valid state with matching selector.
336 2. Valid state with inappropriate selector. Skip.
338 Entering area of "sysdeps".
340 3. If state is not valid, selector is temporary,
341 it selects only session which triggered
342 previous resolution. Key manager will do
343 something to install a state with proper
346 if (x->km.state == XFRM_STATE_VALID) {
347 if (!xfrm_selector_match(&x->sel, fl, family) ||
348 !xfrm_sec_ctx_match(pol->security, x->security))
351 best->km.dying > x->km.dying ||
352 (best->km.dying == x->km.dying &&
353 best->curlft.add_time < x->curlft.add_time))
355 } else if (x->km.state == XFRM_STATE_ACQ) {
356 acquire_in_progress = 1;
357 } else if (x->km.state == XFRM_STATE_ERROR ||
358 x->km.state == XFRM_STATE_EXPIRED) {
359 if (xfrm_selector_match(&x->sel, fl, family) &&
360 xfrm_sec_ctx_match(pol->security, x->security))
367 if (!x && !error && !acquire_in_progress) {
369 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
370 tmpl->id.proto)) != NULL) {
375 x = xfrm_state_alloc();
380 /* Initialize temporary selector matching only
381 * to current session. */
382 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
384 if (km_query(x, tmpl, pol) == 0) {
385 x->km.state = XFRM_STATE_ACQ;
386 list_add_tail(&x->bydst, xfrm_state_bydst+h);
389 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
390 list_add(&x->byspi, xfrm_state_byspi+h);
393 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
395 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
396 add_timer(&x->timer);
398 x->km.state = XFRM_STATE_DEAD;
408 *err = acquire_in_progress ? -EAGAIN : error;
409 spin_unlock_bh(&xfrm_state_lock);
410 xfrm_state_put_afinfo(afinfo);
414 static void __xfrm_state_insert(struct xfrm_state *x)
416 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
418 list_add(&x->bydst, xfrm_state_bydst+h);
421 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
423 list_add(&x->byspi, xfrm_state_byspi+h);
426 if (!mod_timer(&x->timer, jiffies + HZ))
432 void xfrm_state_insert(struct xfrm_state *x)
434 spin_lock_bh(&xfrm_state_lock);
435 __xfrm_state_insert(x);
436 spin_unlock_bh(&xfrm_state_lock);
438 xfrm_flush_all_bundles();
440 EXPORT_SYMBOL(xfrm_state_insert);
442 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
444 int xfrm_state_add(struct xfrm_state *x)
446 struct xfrm_state_afinfo *afinfo;
447 struct xfrm_state *x1;
451 family = x->props.family;
452 afinfo = xfrm_state_get_afinfo(family);
453 if (unlikely(afinfo == NULL))
454 return -EAFNOSUPPORT;
456 spin_lock_bh(&xfrm_state_lock);
458 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
467 x1 = __xfrm_find_acq_byseq(x->km.seq);
468 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
475 x1 = afinfo->find_acq(
476 x->props.mode, x->props.reqid, x->id.proto,
477 &x->id.daddr, &x->props.saddr, 0);
479 __xfrm_state_insert(x);
483 spin_unlock_bh(&xfrm_state_lock);
484 xfrm_state_put_afinfo(afinfo);
487 xfrm_flush_all_bundles();
490 xfrm_state_delete(x1);
496 EXPORT_SYMBOL(xfrm_state_add);
498 int xfrm_state_update(struct xfrm_state *x)
500 struct xfrm_state_afinfo *afinfo;
501 struct xfrm_state *x1;
504 afinfo = xfrm_state_get_afinfo(x->props.family);
505 if (unlikely(afinfo == NULL))
506 return -EAFNOSUPPORT;
508 spin_lock_bh(&xfrm_state_lock);
509 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
515 if (xfrm_state_kern(x1)) {
521 if (x1->km.state == XFRM_STATE_ACQ) {
522 __xfrm_state_insert(x);
528 spin_unlock_bh(&xfrm_state_lock);
529 xfrm_state_put_afinfo(afinfo);
535 xfrm_state_delete(x1);
541 spin_lock_bh(&x1->lock);
542 if (likely(x1->km.state == XFRM_STATE_VALID)) {
543 if (x->encap && x1->encap)
544 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
545 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
548 if (!mod_timer(&x1->timer, jiffies + HZ))
550 if (x1->curlft.use_time)
551 xfrm_state_check_expire(x1);
555 spin_unlock_bh(&x1->lock);
561 EXPORT_SYMBOL(xfrm_state_update);
563 int xfrm_state_check_expire(struct xfrm_state *x)
565 if (!x->curlft.use_time)
566 x->curlft.use_time = (unsigned long)xtime.tv_sec;
568 if (x->km.state != XFRM_STATE_VALID)
571 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
572 x->curlft.packets >= x->lft.hard_packet_limit) {
573 x->km.state = XFRM_STATE_EXPIRED;
574 if (!mod_timer(&x->timer, jiffies))
580 (x->curlft.bytes >= x->lft.soft_byte_limit ||
581 x->curlft.packets >= x->lft.soft_packet_limit)) {
583 km_state_expired(x, 0);
587 EXPORT_SYMBOL(xfrm_state_check_expire);
589 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
591 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
595 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
597 /* Check tail too... */
601 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
603 int err = xfrm_state_check_expire(x);
606 err = xfrm_state_check_space(x, skb);
610 EXPORT_SYMBOL(xfrm_state_check);
613 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
614 unsigned short family)
616 struct xfrm_state *x;
617 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
621 spin_lock_bh(&xfrm_state_lock);
622 x = afinfo->state_lookup(daddr, spi, proto);
623 spin_unlock_bh(&xfrm_state_lock);
624 xfrm_state_put_afinfo(afinfo);
627 EXPORT_SYMBOL(xfrm_state_lookup);
630 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
631 xfrm_address_t *daddr, xfrm_address_t *saddr,
632 int create, unsigned short family)
634 struct xfrm_state *x;
635 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
639 spin_lock_bh(&xfrm_state_lock);
640 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
641 spin_unlock_bh(&xfrm_state_lock);
642 xfrm_state_put_afinfo(afinfo);
645 EXPORT_SYMBOL(xfrm_find_acq);
647 /* Silly enough, but I'm lazy to build resolution list */
649 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
652 struct xfrm_state *x;
654 for (i = 0; i < XFRM_DST_HSIZE; i++) {
655 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
656 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
665 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
667 struct xfrm_state *x;
669 spin_lock_bh(&xfrm_state_lock);
670 x = __xfrm_find_acq_byseq(seq);
671 spin_unlock_bh(&xfrm_state_lock);
674 EXPORT_SYMBOL(xfrm_find_acq_byseq);
676 u32 xfrm_get_acqseq(void)
680 static DEFINE_SPINLOCK(acqseq_lock);
682 spin_lock_bh(&acqseq_lock);
683 res = (++acqseq ? : ++acqseq);
684 spin_unlock_bh(&acqseq_lock);
687 EXPORT_SYMBOL(xfrm_get_acqseq);
690 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
693 struct xfrm_state *x0;
698 if (minspi == maxspi) {
699 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
707 minspi = ntohl(minspi);
708 maxspi = ntohl(maxspi);
709 for (h=0; h<maxspi-minspi+1; h++) {
710 spi = minspi + net_random()%(maxspi-minspi+1);
711 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
713 x->id.spi = htonl(spi);
720 spin_lock_bh(&xfrm_state_lock);
721 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
722 list_add(&x->byspi, xfrm_state_byspi+h);
724 spin_unlock_bh(&xfrm_state_lock);
728 EXPORT_SYMBOL(xfrm_alloc_spi);
730 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
734 struct xfrm_state *x;
738 spin_lock_bh(&xfrm_state_lock);
739 for (i = 0; i < XFRM_DST_HSIZE; i++) {
740 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
741 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
750 for (i = 0; i < XFRM_DST_HSIZE; i++) {
751 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
752 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
754 err = func(x, --count, data);
760 spin_unlock_bh(&xfrm_state_lock);
763 EXPORT_SYMBOL(xfrm_state_walk);
765 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
771 if (unlikely(seq == 0))
774 if (likely(seq > x->replay.seq))
777 diff = x->replay.seq - seq;
778 if (diff >= x->props.replay_window) {
779 x->stats.replay_window++;
783 if (x->replay.bitmap & (1U << diff)) {
789 EXPORT_SYMBOL(xfrm_replay_check);
791 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
797 if (seq > x->replay.seq) {
798 diff = seq - x->replay.seq;
799 if (diff < x->props.replay_window)
800 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
802 x->replay.bitmap = 1;
805 diff = x->replay.seq - seq;
806 x->replay.bitmap |= (1U << diff);
809 EXPORT_SYMBOL(xfrm_replay_advance);
811 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
812 static DEFINE_RWLOCK(xfrm_km_lock);
814 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
818 read_lock(&xfrm_km_lock);
819 list_for_each_entry(km, &xfrm_km_list, list)
820 if (km->notify_policy)
821 km->notify_policy(xp, dir, c);
822 read_unlock(&xfrm_km_lock);
825 void km_state_notify(struct xfrm_state *x, struct km_event *c)
828 read_lock(&xfrm_km_lock);
829 list_for_each_entry(km, &xfrm_km_list, list)
832 read_unlock(&xfrm_km_lock);
835 EXPORT_SYMBOL(km_policy_notify);
836 EXPORT_SYMBOL(km_state_notify);
838 static void km_state_expired(struct xfrm_state *x, int hard)
843 c.event = XFRM_MSG_EXPIRE;
844 km_state_notify(x, &c);
851 * We send to all registered managers regardless of failure
852 * We are happy with one success
854 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
856 int err = -EINVAL, acqret;
859 read_lock(&xfrm_km_lock);
860 list_for_each_entry(km, &xfrm_km_list, list) {
861 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
865 read_unlock(&xfrm_km_lock);
869 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
874 read_lock(&xfrm_km_lock);
875 list_for_each_entry(km, &xfrm_km_list, list) {
877 err = km->new_mapping(x, ipaddr, sport);
881 read_unlock(&xfrm_km_lock);
884 EXPORT_SYMBOL(km_new_mapping);
886 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
891 c.event = XFRM_MSG_POLEXPIRE;
892 km_policy_notify(pol, dir, &c);
898 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
903 struct xfrm_policy *pol = NULL;
905 if (optlen <= 0 || optlen > PAGE_SIZE)
908 data = kmalloc(optlen, GFP_KERNEL);
913 if (copy_from_user(data, optval, optlen))
917 read_lock(&xfrm_km_lock);
918 list_for_each_entry(km, &xfrm_km_list, list) {
919 pol = km->compile_policy(sk->sk_family, optname, data,
924 read_unlock(&xfrm_km_lock);
927 xfrm_sk_policy_insert(sk, err, pol);
936 EXPORT_SYMBOL(xfrm_user_policy);
938 int xfrm_register_km(struct xfrm_mgr *km)
940 write_lock_bh(&xfrm_km_lock);
941 list_add_tail(&km->list, &xfrm_km_list);
942 write_unlock_bh(&xfrm_km_lock);
945 EXPORT_SYMBOL(xfrm_register_km);
947 int xfrm_unregister_km(struct xfrm_mgr *km)
949 write_lock_bh(&xfrm_km_lock);
951 write_unlock_bh(&xfrm_km_lock);
954 EXPORT_SYMBOL(xfrm_unregister_km);
956 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
959 if (unlikely(afinfo == NULL))
961 if (unlikely(afinfo->family >= NPROTO))
962 return -EAFNOSUPPORT;
963 write_lock(&xfrm_state_afinfo_lock);
964 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
967 afinfo->state_bydst = xfrm_state_bydst;
968 afinfo->state_byspi = xfrm_state_byspi;
969 xfrm_state_afinfo[afinfo->family] = afinfo;
971 write_unlock(&xfrm_state_afinfo_lock);
974 EXPORT_SYMBOL(xfrm_state_register_afinfo);
976 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
979 if (unlikely(afinfo == NULL))
981 if (unlikely(afinfo->family >= NPROTO))
982 return -EAFNOSUPPORT;
983 write_lock(&xfrm_state_afinfo_lock);
984 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
985 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
988 xfrm_state_afinfo[afinfo->family] = NULL;
989 afinfo->state_byspi = NULL;
990 afinfo->state_bydst = NULL;
993 write_unlock(&xfrm_state_afinfo_lock);
996 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
998 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1000 struct xfrm_state_afinfo *afinfo;
1001 if (unlikely(family >= NPROTO))
1003 read_lock(&xfrm_state_afinfo_lock);
1004 afinfo = xfrm_state_afinfo[family];
1005 if (likely(afinfo != NULL))
1006 read_lock(&afinfo->lock);
1007 read_unlock(&xfrm_state_afinfo_lock);
1011 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1013 if (unlikely(afinfo == NULL))
1015 read_unlock(&afinfo->lock);
1018 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1019 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1022 struct xfrm_state *t = x->tunnel;
1024 if (atomic_read(&t->tunnel_users) == 2)
1025 xfrm_state_delete(t);
1026 atomic_dec(&t->tunnel_users);
1031 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1034 * This function is NOT optimal. For example, with ESP it will give an
1035 * MTU that's usually two bytes short of being optimal. However, it will
1036 * usually give an answer that's a multiple of 4 provided the input is
1037 * also a multiple of 4.
1039 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1043 res -= x->props.header_len;
1051 spin_lock_bh(&x->lock);
1052 if (x->km.state == XFRM_STATE_VALID &&
1053 x->type && x->type->get_max_size)
1054 m = x->type->get_max_size(x, m);
1056 m += x->props.header_len;
1057 spin_unlock_bh(&x->lock);
1067 EXPORT_SYMBOL(xfrm_state_mtu);
1069 int xfrm_init_state(struct xfrm_state *x)
1071 struct xfrm_state_afinfo *afinfo;
1072 int family = x->props.family;
1075 err = -EAFNOSUPPORT;
1076 afinfo = xfrm_state_get_afinfo(family);
1081 if (afinfo->init_flags)
1082 err = afinfo->init_flags(x);
1084 xfrm_state_put_afinfo(afinfo);
1089 err = -EPROTONOSUPPORT;
1090 x->type = xfrm_get_type(x->id.proto, family);
1091 if (x->type == NULL)
1094 err = x->type->init_state(x);
1098 x->km.state = XFRM_STATE_VALID;
1104 EXPORT_SYMBOL(xfrm_init_state);
1106 void __init xfrm_state_init(void)
1110 for (i=0; i<XFRM_DST_HSIZE; i++) {
1111 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1112 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1114 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);