2 * Copyright (C)2003,2004 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
21 * Based on net/ipv4/xfrm4_tunnel.c
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/xfrm.h>
27 #include <linux/list.h>
31 #include <net/protocol.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
34 #include <linux/mutex.h>
36 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
42 #define X6TPRINTK(fmt, args...) printk(fmt, ## args)
43 #define X6TNOPRINTK(fmt, args...) do { ; } while(0)
46 # define X6TPRINTK1 X6TPRINTK
48 # define X6TPRINTK1 X6TNOPRINTK
52 # define X6TPRINTK3 X6TPRINTK
54 # define X6TPRINTK3 X6TNOPRINTK
58 * xfrm_tunnel_spi things are for allocating unique id ("spi")
61 struct xfrm6_tunnel_spi {
62 struct hlist_node list_byaddr;
63 struct hlist_node list_byspi;
67 #ifdef XFRM6_TUNNEL_SPI_MAGIC
72 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
73 # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
76 static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
78 static u32 xfrm6_tunnel_spi;
80 #define XFRM6_TUNNEL_SPI_MIN 1
81 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
83 static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
85 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
86 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
88 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
89 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
91 #ifdef XFRM6_TUNNEL_SPI_MAGIC
92 static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
95 if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
96 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
97 "at %p has corrupted magic %08x "
99 name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
105 static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
112 #define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
115 static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
119 X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
121 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
124 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
126 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
131 static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
133 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
137 static int xfrm6_tunnel_spi_init(void)
141 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
143 xfrm6_tunnel_spi = 0;
144 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
145 sizeof(struct xfrm6_tunnel_spi),
146 0, SLAB_HWCACHE_ALIGN,
148 if (!xfrm6_tunnel_spi_kmem) {
150 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
155 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
156 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
157 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
158 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
162 static void xfrm6_tunnel_spi_fini(void)
166 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
168 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
169 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
172 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
173 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
176 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
177 xfrm6_tunnel_spi_kmem = NULL;
180 X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
184 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
186 struct xfrm6_tunnel_spi *x6spi;
187 struct hlist_node *pos;
189 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
191 hlist_for_each_entry(x6spi, pos,
192 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
194 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
195 X6SPI_CHECK_MAGIC(x6spi);
196 X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
201 X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
205 u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
207 struct xfrm6_tunnel_spi *x6spi;
210 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
212 read_lock_bh(&xfrm6_tunnel_spi_lock);
213 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
214 spi = x6spi ? x6spi->spi : 0;
215 read_unlock_bh(&xfrm6_tunnel_spi_lock);
219 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
221 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
224 struct xfrm6_tunnel_spi *x6spi;
225 struct hlist_node *pos;
228 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
230 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
231 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
232 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
236 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
237 index = xfrm6_tunnel_spi_hash_byspi(spi);
238 hlist_for_each_entry(x6spi, pos,
239 &xfrm6_tunnel_spi_byspi[index],
241 if (x6spi->spi == spi)
244 xfrm6_tunnel_spi = spi;
248 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
249 index = xfrm6_tunnel_spi_hash_byspi(spi);
250 hlist_for_each_entry(x6spi, pos,
251 &xfrm6_tunnel_spi_byspi[index],
253 if (x6spi->spi == spi)
256 xfrm6_tunnel_spi = spi;
263 X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
265 NIP6(*(struct in6_addr *)saddr));
266 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
268 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
272 #ifdef XFRM6_TUNNEL_SPI_MAGIC
273 x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
275 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
277 atomic_set(&x6spi->refcnt, 1);
279 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
281 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
282 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
283 X6SPI_CHECK_MAGIC(x6spi);
285 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
289 u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
291 struct xfrm6_tunnel_spi *x6spi;
294 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
296 write_lock_bh(&xfrm6_tunnel_spi_lock);
297 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
299 atomic_inc(&x6spi->refcnt);
302 spi = __xfrm6_tunnel_alloc_spi(saddr);
303 write_unlock_bh(&xfrm6_tunnel_spi_lock);
305 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
310 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
312 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
314 struct xfrm6_tunnel_spi *x6spi;
315 struct hlist_node *pos, *n;
317 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
319 write_lock_bh(&xfrm6_tunnel_spi_lock);
321 hlist_for_each_entry_safe(x6spi, pos, n,
322 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
325 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
326 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
329 NIP6(*(struct in6_addr *)saddr),
331 X6SPI_CHECK_MAGIC(x6spi);
332 if (atomic_dec_and_test(&x6spi->refcnt)) {
333 hlist_del(&x6spi->list_byaddr);
334 hlist_del(&x6spi->list_byspi);
335 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
340 write_unlock_bh(&xfrm6_tunnel_spi_lock);
343 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
345 static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
347 struct ipv6hdr *top_iph;
349 top_iph = (struct ipv6hdr *)skb->data;
350 top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
355 static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
360 static struct xfrm6_tunnel *xfrm6_tunnel_handler;
361 static DEFINE_MUTEX(xfrm6_tunnel_mutex);
363 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
367 mutex_lock(&xfrm6_tunnel_mutex);
369 if (xfrm6_tunnel_handler != NULL)
372 xfrm6_tunnel_handler = handler;
373 mutex_unlock(&xfrm6_tunnel_mutex);
378 EXPORT_SYMBOL(xfrm6_tunnel_register);
380 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
384 mutex_lock(&xfrm6_tunnel_mutex);
386 if (xfrm6_tunnel_handler != handler)
389 xfrm6_tunnel_handler = NULL;
390 mutex_unlock(&xfrm6_tunnel_mutex);
397 EXPORT_SYMBOL(xfrm6_tunnel_deregister);
399 static int xfrm6_tunnel_rcv(struct sk_buff **pskb)
401 struct sk_buff *skb = *pskb;
402 struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
403 struct ipv6hdr *iph = skb->nh.ipv6h;
406 /* device-like_ip6ip6_handler() */
407 if (handler && handler->handler(pskb) == 0)
410 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
411 return xfrm6_rcv_spi(pskb, spi);
414 static void xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
415 int type, int code, int offset, __u32 info)
417 struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
419 /* call here first for device-like ip6ip6 err handling */
421 handler->err_handler(skb, opt, type, code, offset, info);
425 /* xfrm6_tunnel native err handling */
427 case ICMPV6_DEST_UNREACH:
430 case ICMPV6_ADM_PROHIBITED:
431 case ICMPV6_NOT_NEIGHBOUR:
432 case ICMPV6_ADDR_UNREACH:
433 case ICMPV6_PORT_UNREACH:
435 X6TPRINTK3(KERN_DEBUG
436 "xfrm6_tunnel: Destination Unreach.\n");
440 case ICMPV6_PKT_TOOBIG:
441 X6TPRINTK3(KERN_DEBUG
442 "xfrm6_tunnel: Packet Too Big.\n");
444 case ICMPV6_TIME_EXCEED:
446 case ICMPV6_EXC_HOPLIMIT:
447 X6TPRINTK3(KERN_DEBUG
448 "xfrm6_tunnel: Too small Hoplimit.\n");
450 case ICMPV6_EXC_FRAGTIME:
455 case ICMPV6_PARAMPROB:
457 case ICMPV6_HDR_FIELD: break;
458 case ICMPV6_UNK_NEXTHDR: break;
459 case ICMPV6_UNK_OPTION: break;
468 static int xfrm6_tunnel_init_state(struct xfrm_state *x)
476 x->props.header_len = sizeof(struct ipv6hdr);
481 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
483 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
486 static struct xfrm_type xfrm6_tunnel_type = {
487 .description = "IP6IP6",
488 .owner = THIS_MODULE,
489 .proto = IPPROTO_IPV6,
490 .init_state = xfrm6_tunnel_init_state,
491 .destructor = xfrm6_tunnel_destroy,
492 .input = xfrm6_tunnel_input,
493 .output = xfrm6_tunnel_output,
496 static struct inet6_protocol xfrm6_tunnel_protocol = {
497 .handler = xfrm6_tunnel_rcv,
498 .err_handler = xfrm6_tunnel_err,
499 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
502 static int __init xfrm6_tunnel_init(void)
504 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
506 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
508 "xfrm6_tunnel init: can't add xfrm type\n");
511 if (inet6_add_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0) {
513 "xfrm6_tunnel init(): can't add protocol\n");
514 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
517 if (xfrm6_tunnel_spi_init() < 0) {
519 "xfrm6_tunnel init: failed to initialize spi\n");
520 inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6);
521 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
527 static void __exit xfrm6_tunnel_fini(void)
529 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
531 xfrm6_tunnel_spi_fini();
532 if (inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0)
534 "xfrm6_tunnel close: can't remove protocol\n");
535 if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
537 "xfrm6_tunnel close: can't remove xfrm type\n");
540 module_init(xfrm6_tunnel_init);
541 module_exit(xfrm6_tunnel_fini);
542 MODULE_LICENSE("GPL");