6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
24 EXPORT_SYMBOL(xfrm_nl);
26 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
27 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
29 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
32 /* Each xfrm_state may be linked to two tables:
34 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
35 2. Hash table by daddr to find what SAs exist for given
36 destination/tunnel endpoint. (output)
39 static DEFINE_SPINLOCK(xfrm_state_lock);
41 /* Hash table to find appropriate SA towards given target (endpoint
42 * of tunnel or destination of transport mode) allowed by selector.
44 * Main use is finding SA after policy selected tunnel or transport mode.
45 * Also, it can be used by ah/esp icmp error handler to find offending SA.
47 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
48 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
50 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
51 EXPORT_SYMBOL(km_waitq);
53 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
54 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
56 static struct work_struct xfrm_state_gc_work;
57 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
58 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
60 static int xfrm_state_gc_flush_bundles;
62 int __xfrm_state_delete(struct xfrm_state *x);
64 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
65 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
67 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
68 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
70 static void xfrm_state_gc_destroy(struct xfrm_state *x)
72 if (del_timer(&x->timer))
74 if (del_timer(&x->rtimer))
81 x->type->destructor(x);
82 xfrm_put_type(x->type);
84 security_xfrm_state_free(x);
88 static void xfrm_state_gc_task(void *data)
91 struct list_head *entry, *tmp;
92 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
94 if (xfrm_state_gc_flush_bundles) {
95 xfrm_state_gc_flush_bundles = 0;
99 spin_lock_bh(&xfrm_state_gc_lock);
100 list_splice_init(&xfrm_state_gc_list, &gc_list);
101 spin_unlock_bh(&xfrm_state_gc_lock);
103 list_for_each_safe(entry, tmp, &gc_list) {
104 x = list_entry(entry, struct xfrm_state, bydst);
105 xfrm_state_gc_destroy(x);
110 static inline unsigned long make_jiffies(long secs)
112 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
113 return MAX_SCHEDULE_TIMEOUT-1;
118 static void xfrm_timer_handler(unsigned long data)
120 struct xfrm_state *x = (struct xfrm_state*)data;
121 unsigned long now = (unsigned long)xtime.tv_sec;
122 long next = LONG_MAX;
126 if (x->km.state == XFRM_STATE_DEAD)
128 if (x->km.state == XFRM_STATE_EXPIRED)
130 if (x->lft.hard_add_expires_seconds) {
131 long tmo = x->lft.hard_add_expires_seconds +
132 x->curlft.add_time - now;
138 if (x->lft.hard_use_expires_seconds) {
139 long tmo = x->lft.hard_use_expires_seconds +
140 (x->curlft.use_time ? : now) - now;
148 if (x->lft.soft_add_expires_seconds) {
149 long tmo = x->lft.soft_add_expires_seconds +
150 x->curlft.add_time - now;
156 if (x->lft.soft_use_expires_seconds) {
157 long tmo = x->lft.soft_use_expires_seconds +
158 (x->curlft.use_time ? : now) - now;
167 km_state_expired(x, 0, 0);
169 if (next != LONG_MAX &&
170 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
175 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
176 x->km.state = XFRM_STATE_EXPIRED;
181 if (!__xfrm_state_delete(x) && x->id.spi)
182 km_state_expired(x, 1, 0);
185 spin_unlock(&x->lock);
189 static void xfrm_replay_timer_handler(unsigned long data);
191 struct xfrm_state *xfrm_state_alloc(void)
193 struct xfrm_state *x;
195 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
198 memset(x, 0, sizeof(struct xfrm_state));
199 atomic_set(&x->refcnt, 1);
200 atomic_set(&x->tunnel_users, 0);
201 INIT_LIST_HEAD(&x->bydst);
202 INIT_LIST_HEAD(&x->byspi);
203 init_timer(&x->timer);
204 x->timer.function = xfrm_timer_handler;
205 x->timer.data = (unsigned long)x;
206 init_timer(&x->rtimer);
207 x->rtimer.function = xfrm_replay_timer_handler;
208 x->rtimer.data = (unsigned long)x;
209 x->curlft.add_time = (unsigned long)xtime.tv_sec;
210 x->lft.soft_byte_limit = XFRM_INF;
211 x->lft.soft_packet_limit = XFRM_INF;
212 x->lft.hard_byte_limit = XFRM_INF;
213 x->lft.hard_packet_limit = XFRM_INF;
214 x->replay_maxage = 0;
215 x->replay_maxdiff = 0;
216 spin_lock_init(&x->lock);
220 EXPORT_SYMBOL(xfrm_state_alloc);
222 void __xfrm_state_destroy(struct xfrm_state *x)
224 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
226 spin_lock_bh(&xfrm_state_gc_lock);
227 list_add(&x->bydst, &xfrm_state_gc_list);
228 spin_unlock_bh(&xfrm_state_gc_lock);
229 schedule_work(&xfrm_state_gc_work);
231 EXPORT_SYMBOL(__xfrm_state_destroy);
233 int __xfrm_state_delete(struct xfrm_state *x)
237 if (x->km.state != XFRM_STATE_DEAD) {
238 x->km.state = XFRM_STATE_DEAD;
239 spin_lock(&xfrm_state_lock);
246 spin_unlock(&xfrm_state_lock);
247 if (del_timer(&x->timer))
249 if (del_timer(&x->rtimer))
252 /* The number two in this test is the reference
253 * mentioned in the comment below plus the reference
254 * our caller holds. A larger value means that
255 * there are DSTs attached to this xfrm_state.
257 if (atomic_read(&x->refcnt) > 2) {
258 xfrm_state_gc_flush_bundles = 1;
259 schedule_work(&xfrm_state_gc_work);
262 /* All xfrm_state objects are created by xfrm_state_alloc.
263 * The xfrm_state_alloc call gives a reference, and that
264 * is what we are dropping here.
272 EXPORT_SYMBOL(__xfrm_state_delete);
274 int xfrm_state_delete(struct xfrm_state *x)
278 spin_lock_bh(&x->lock);
279 err = __xfrm_state_delete(x);
280 spin_unlock_bh(&x->lock);
284 EXPORT_SYMBOL(xfrm_state_delete);
286 void xfrm_state_flush(u8 proto)
289 struct xfrm_state *x;
291 spin_lock_bh(&xfrm_state_lock);
292 for (i = 0; i < XFRM_DST_HSIZE; i++) {
294 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
295 if (!xfrm_state_kern(x) &&
296 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
298 spin_unlock_bh(&xfrm_state_lock);
300 xfrm_state_delete(x);
303 spin_lock_bh(&xfrm_state_lock);
308 spin_unlock_bh(&xfrm_state_lock);
311 EXPORT_SYMBOL(xfrm_state_flush);
314 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
315 struct xfrm_tmpl *tmpl,
316 xfrm_address_t *daddr, xfrm_address_t *saddr,
317 unsigned short family)
319 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
322 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
323 xfrm_state_put_afinfo(afinfo);
328 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
329 struct flowi *fl, struct xfrm_tmpl *tmpl,
330 struct xfrm_policy *pol, int *err,
331 unsigned short family)
333 unsigned h = xfrm_dst_hash(daddr, family);
334 struct xfrm_state *x, *x0;
335 int acquire_in_progress = 0;
337 struct xfrm_state *best = NULL;
338 struct xfrm_state_afinfo *afinfo;
340 afinfo = xfrm_state_get_afinfo(family);
341 if (afinfo == NULL) {
342 *err = -EAFNOSUPPORT;
346 spin_lock_bh(&xfrm_state_lock);
347 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
348 if (x->props.family == family &&
349 x->props.reqid == tmpl->reqid &&
350 xfrm_state_addr_check(x, daddr, saddr, family) &&
351 tmpl->mode == x->props.mode &&
352 tmpl->id.proto == x->id.proto &&
353 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
355 1. There is a valid state with matching selector.
357 2. Valid state with inappropriate selector. Skip.
359 Entering area of "sysdeps".
361 3. If state is not valid, selector is temporary,
362 it selects only session which triggered
363 previous resolution. Key manager will do
364 something to install a state with proper
367 if (x->km.state == XFRM_STATE_VALID) {
368 if (!xfrm_selector_match(&x->sel, fl, family) ||
369 !xfrm_sec_ctx_match(pol->security, x->security))
372 best->km.dying > x->km.dying ||
373 (best->km.dying == x->km.dying &&
374 best->curlft.add_time < x->curlft.add_time))
376 } else if (x->km.state == XFRM_STATE_ACQ) {
377 acquire_in_progress = 1;
378 } else if (x->km.state == XFRM_STATE_ERROR ||
379 x->km.state == XFRM_STATE_EXPIRED) {
380 if (xfrm_selector_match(&x->sel, fl, family) &&
381 xfrm_sec_ctx_match(pol->security, x->security))
388 if (!x && !error && !acquire_in_progress) {
390 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
391 tmpl->id.proto)) != NULL) {
396 x = xfrm_state_alloc();
401 /* Initialize temporary selector matching only
402 * to current session. */
403 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
405 if (km_query(x, tmpl, pol) == 0) {
406 x->km.state = XFRM_STATE_ACQ;
407 list_add_tail(&x->bydst, xfrm_state_bydst+h);
410 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
411 list_add(&x->byspi, xfrm_state_byspi+h);
414 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
416 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
417 add_timer(&x->timer);
419 x->km.state = XFRM_STATE_DEAD;
429 *err = acquire_in_progress ? -EAGAIN : error;
430 spin_unlock_bh(&xfrm_state_lock);
431 xfrm_state_put_afinfo(afinfo);
435 static void __xfrm_state_insert(struct xfrm_state *x)
437 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
439 list_add(&x->bydst, xfrm_state_bydst+h);
442 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
444 list_add(&x->byspi, xfrm_state_byspi+h);
447 if (!mod_timer(&x->timer, jiffies + HZ))
450 if (x->replay_maxage &&
451 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
457 void xfrm_state_insert(struct xfrm_state *x)
459 spin_lock_bh(&xfrm_state_lock);
460 __xfrm_state_insert(x);
461 spin_unlock_bh(&xfrm_state_lock);
463 xfrm_flush_all_bundles();
465 EXPORT_SYMBOL(xfrm_state_insert);
467 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
469 int xfrm_state_add(struct xfrm_state *x)
471 struct xfrm_state_afinfo *afinfo;
472 struct xfrm_state *x1;
476 family = x->props.family;
477 afinfo = xfrm_state_get_afinfo(family);
478 if (unlikely(afinfo == NULL))
479 return -EAFNOSUPPORT;
481 spin_lock_bh(&xfrm_state_lock);
483 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
492 x1 = __xfrm_find_acq_byseq(x->km.seq);
493 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
500 x1 = afinfo->find_acq(
501 x->props.mode, x->props.reqid, x->id.proto,
502 &x->id.daddr, &x->props.saddr, 0);
504 __xfrm_state_insert(x);
508 spin_unlock_bh(&xfrm_state_lock);
509 xfrm_state_put_afinfo(afinfo);
512 xfrm_flush_all_bundles();
515 xfrm_state_delete(x1);
521 EXPORT_SYMBOL(xfrm_state_add);
523 int xfrm_state_update(struct xfrm_state *x)
525 struct xfrm_state_afinfo *afinfo;
526 struct xfrm_state *x1;
529 afinfo = xfrm_state_get_afinfo(x->props.family);
530 if (unlikely(afinfo == NULL))
531 return -EAFNOSUPPORT;
533 spin_lock_bh(&xfrm_state_lock);
534 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
540 if (xfrm_state_kern(x1)) {
546 if (x1->km.state == XFRM_STATE_ACQ) {
547 __xfrm_state_insert(x);
553 spin_unlock_bh(&xfrm_state_lock);
554 xfrm_state_put_afinfo(afinfo);
560 xfrm_state_delete(x1);
566 spin_lock_bh(&x1->lock);
567 if (likely(x1->km.state == XFRM_STATE_VALID)) {
568 if (x->encap && x1->encap)
569 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
570 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
573 if (!mod_timer(&x1->timer, jiffies + HZ))
575 if (x1->curlft.use_time)
576 xfrm_state_check_expire(x1);
580 spin_unlock_bh(&x1->lock);
586 EXPORT_SYMBOL(xfrm_state_update);
588 int xfrm_state_check_expire(struct xfrm_state *x)
590 if (!x->curlft.use_time)
591 x->curlft.use_time = (unsigned long)xtime.tv_sec;
593 if (x->km.state != XFRM_STATE_VALID)
596 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
597 x->curlft.packets >= x->lft.hard_packet_limit) {
598 x->km.state = XFRM_STATE_EXPIRED;
599 if (!mod_timer(&x->timer, jiffies))
605 (x->curlft.bytes >= x->lft.soft_byte_limit ||
606 x->curlft.packets >= x->lft.soft_packet_limit)) {
608 km_state_expired(x, 0, 0);
612 EXPORT_SYMBOL(xfrm_state_check_expire);
614 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
616 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
620 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
622 /* Check tail too... */
626 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
628 int err = xfrm_state_check_expire(x);
631 err = xfrm_state_check_space(x, skb);
635 EXPORT_SYMBOL(xfrm_state_check);
638 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
639 unsigned short family)
641 struct xfrm_state *x;
642 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
646 spin_lock_bh(&xfrm_state_lock);
647 x = afinfo->state_lookup(daddr, spi, proto);
648 spin_unlock_bh(&xfrm_state_lock);
649 xfrm_state_put_afinfo(afinfo);
652 EXPORT_SYMBOL(xfrm_state_lookup);
655 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
656 xfrm_address_t *daddr, xfrm_address_t *saddr,
657 int create, unsigned short family)
659 struct xfrm_state *x;
660 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
664 spin_lock_bh(&xfrm_state_lock);
665 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
666 spin_unlock_bh(&xfrm_state_lock);
667 xfrm_state_put_afinfo(afinfo);
670 EXPORT_SYMBOL(xfrm_find_acq);
672 /* Silly enough, but I'm lazy to build resolution list */
674 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
677 struct xfrm_state *x;
679 for (i = 0; i < XFRM_DST_HSIZE; i++) {
680 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
681 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
690 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
692 struct xfrm_state *x;
694 spin_lock_bh(&xfrm_state_lock);
695 x = __xfrm_find_acq_byseq(seq);
696 spin_unlock_bh(&xfrm_state_lock);
699 EXPORT_SYMBOL(xfrm_find_acq_byseq);
701 u32 xfrm_get_acqseq(void)
705 static DEFINE_SPINLOCK(acqseq_lock);
707 spin_lock_bh(&acqseq_lock);
708 res = (++acqseq ? : ++acqseq);
709 spin_unlock_bh(&acqseq_lock);
712 EXPORT_SYMBOL(xfrm_get_acqseq);
715 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
718 struct xfrm_state *x0;
723 if (minspi == maxspi) {
724 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
732 minspi = ntohl(minspi);
733 maxspi = ntohl(maxspi);
734 for (h=0; h<maxspi-minspi+1; h++) {
735 spi = minspi + net_random()%(maxspi-minspi+1);
736 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
738 x->id.spi = htonl(spi);
745 spin_lock_bh(&xfrm_state_lock);
746 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
747 list_add(&x->byspi, xfrm_state_byspi+h);
749 spin_unlock_bh(&xfrm_state_lock);
753 EXPORT_SYMBOL(xfrm_alloc_spi);
755 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
759 struct xfrm_state *x;
763 spin_lock_bh(&xfrm_state_lock);
764 for (i = 0; i < XFRM_DST_HSIZE; i++) {
765 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
766 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
775 for (i = 0; i < XFRM_DST_HSIZE; i++) {
776 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
777 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
779 err = func(x, --count, data);
785 spin_unlock_bh(&xfrm_state_lock);
788 EXPORT_SYMBOL(xfrm_state_walk);
791 void xfrm_replay_notify(struct xfrm_state *x, int event)
794 /* we send notify messages in case
795 * 1. we updated on of the sequence numbers, and the seqno difference
796 * is at least x->replay_maxdiff, in this case we also update the
797 * timeout of our timer function
798 * 2. if x->replay_maxage has elapsed since last update,
799 * and there were changes
801 * The state structure must be locked!
805 case XFRM_REPLAY_UPDATE:
806 if (x->replay_maxdiff &&
807 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
808 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))
813 case XFRM_REPLAY_TIMEOUT:
814 if ((x->replay.seq == x->preplay.seq) &&
815 (x->replay.bitmap == x->preplay.bitmap) &&
816 (x->replay.oseq == x->preplay.oseq))
822 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
823 c.event = XFRM_MSG_NEWAE;
824 c.data.aevent = event;
825 km_state_notify(x, &c);
827 if (x->replay_maxage &&
828 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
831 EXPORT_SYMBOL(xfrm_replay_notify);
833 static void xfrm_replay_timer_handler(unsigned long data)
835 struct xfrm_state *x = (struct xfrm_state*)data;
839 if (xfrm_aevent_is_on() && x->km.state == XFRM_STATE_VALID)
840 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
842 spin_unlock(&x->lock);
845 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
851 if (unlikely(seq == 0))
854 if (likely(seq > x->replay.seq))
857 diff = x->replay.seq - seq;
858 if (diff >= x->props.replay_window) {
859 x->stats.replay_window++;
863 if (x->replay.bitmap & (1U << diff)) {
869 EXPORT_SYMBOL(xfrm_replay_check);
871 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
877 if (seq > x->replay.seq) {
878 diff = seq - x->replay.seq;
879 if (diff < x->props.replay_window)
880 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
882 x->replay.bitmap = 1;
885 diff = x->replay.seq - seq;
886 x->replay.bitmap |= (1U << diff);
889 if (xfrm_aevent_is_on())
890 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
892 EXPORT_SYMBOL(xfrm_replay_advance);
894 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
895 static DEFINE_RWLOCK(xfrm_km_lock);
897 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
901 read_lock(&xfrm_km_lock);
902 list_for_each_entry(km, &xfrm_km_list, list)
903 if (km->notify_policy)
904 km->notify_policy(xp, dir, c);
905 read_unlock(&xfrm_km_lock);
908 void km_state_notify(struct xfrm_state *x, struct km_event *c)
911 read_lock(&xfrm_km_lock);
912 list_for_each_entry(km, &xfrm_km_list, list)
915 read_unlock(&xfrm_km_lock);
918 EXPORT_SYMBOL(km_policy_notify);
919 EXPORT_SYMBOL(km_state_notify);
921 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
927 c.event = XFRM_MSG_EXPIRE;
928 km_state_notify(x, &c);
934 EXPORT_SYMBOL(km_state_expired);
936 * We send to all registered managers regardless of failure
937 * We are happy with one success
939 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
941 int err = -EINVAL, acqret;
944 read_lock(&xfrm_km_lock);
945 list_for_each_entry(km, &xfrm_km_list, list) {
946 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
950 read_unlock(&xfrm_km_lock);
953 EXPORT_SYMBOL(km_query);
955 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
960 read_lock(&xfrm_km_lock);
961 list_for_each_entry(km, &xfrm_km_list, list) {
963 err = km->new_mapping(x, ipaddr, sport);
967 read_unlock(&xfrm_km_lock);
970 EXPORT_SYMBOL(km_new_mapping);
972 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
978 c.event = XFRM_MSG_POLEXPIRE;
979 km_policy_notify(pol, dir, &c);
984 EXPORT_SYMBOL(km_policy_expired);
986 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
991 struct xfrm_policy *pol = NULL;
993 if (optlen <= 0 || optlen > PAGE_SIZE)
996 data = kmalloc(optlen, GFP_KERNEL);
1001 if (copy_from_user(data, optval, optlen))
1005 read_lock(&xfrm_km_lock);
1006 list_for_each_entry(km, &xfrm_km_list, list) {
1007 pol = km->compile_policy(sk->sk_family, optname, data,
1012 read_unlock(&xfrm_km_lock);
1015 xfrm_sk_policy_insert(sk, err, pol);
1024 EXPORT_SYMBOL(xfrm_user_policy);
1026 int xfrm_register_km(struct xfrm_mgr *km)
1028 write_lock_bh(&xfrm_km_lock);
1029 list_add_tail(&km->list, &xfrm_km_list);
1030 write_unlock_bh(&xfrm_km_lock);
1033 EXPORT_SYMBOL(xfrm_register_km);
1035 int xfrm_unregister_km(struct xfrm_mgr *km)
1037 write_lock_bh(&xfrm_km_lock);
1038 list_del(&km->list);
1039 write_unlock_bh(&xfrm_km_lock);
1042 EXPORT_SYMBOL(xfrm_unregister_km);
1044 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1047 if (unlikely(afinfo == NULL))
1049 if (unlikely(afinfo->family >= NPROTO))
1050 return -EAFNOSUPPORT;
1051 write_lock(&xfrm_state_afinfo_lock);
1052 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1055 afinfo->state_bydst = xfrm_state_bydst;
1056 afinfo->state_byspi = xfrm_state_byspi;
1057 xfrm_state_afinfo[afinfo->family] = afinfo;
1059 write_unlock(&xfrm_state_afinfo_lock);
1062 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1064 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1067 if (unlikely(afinfo == NULL))
1069 if (unlikely(afinfo->family >= NPROTO))
1070 return -EAFNOSUPPORT;
1071 write_lock(&xfrm_state_afinfo_lock);
1072 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1073 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1076 xfrm_state_afinfo[afinfo->family] = NULL;
1077 afinfo->state_byspi = NULL;
1078 afinfo->state_bydst = NULL;
1081 write_unlock(&xfrm_state_afinfo_lock);
1084 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1086 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1088 struct xfrm_state_afinfo *afinfo;
1089 if (unlikely(family >= NPROTO))
1091 read_lock(&xfrm_state_afinfo_lock);
1092 afinfo = xfrm_state_afinfo[family];
1093 if (likely(afinfo != NULL))
1094 read_lock(&afinfo->lock);
1095 read_unlock(&xfrm_state_afinfo_lock);
1099 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1101 if (unlikely(afinfo == NULL))
1103 read_unlock(&afinfo->lock);
1106 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1107 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1110 struct xfrm_state *t = x->tunnel;
1112 if (atomic_read(&t->tunnel_users) == 2)
1113 xfrm_state_delete(t);
1114 atomic_dec(&t->tunnel_users);
1119 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1122 * This function is NOT optimal. For example, with ESP it will give an
1123 * MTU that's usually two bytes short of being optimal. However, it will
1124 * usually give an answer that's a multiple of 4 provided the input is
1125 * also a multiple of 4.
1127 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1131 res -= x->props.header_len;
1139 spin_lock_bh(&x->lock);
1140 if (x->km.state == XFRM_STATE_VALID &&
1141 x->type && x->type->get_max_size)
1142 m = x->type->get_max_size(x, m);
1144 m += x->props.header_len;
1145 spin_unlock_bh(&x->lock);
1155 EXPORT_SYMBOL(xfrm_state_mtu);
1157 int xfrm_init_state(struct xfrm_state *x)
1159 struct xfrm_state_afinfo *afinfo;
1160 int family = x->props.family;
1163 err = -EAFNOSUPPORT;
1164 afinfo = xfrm_state_get_afinfo(family);
1169 if (afinfo->init_flags)
1170 err = afinfo->init_flags(x);
1172 xfrm_state_put_afinfo(afinfo);
1177 err = -EPROTONOSUPPORT;
1178 x->type = xfrm_get_type(x->id.proto, family);
1179 if (x->type == NULL)
1182 err = x->type->init_state(x);
1186 x->km.state = XFRM_STATE_VALID;
1192 EXPORT_SYMBOL(xfrm_init_state);
1194 void __init xfrm_state_init(void)
1198 for (i=0; i<XFRM_DST_HSIZE; i++) {
1199 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1200 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1202 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);