2 * Linux NET3: IP/IP protocol decoder.
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
10 * Alan Cox : Merged and made usable non modular (its so tiny its silly as
11 * a module taking up 2 pages).
12 * Alan Cox : Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
13 * to keep ip_forward happy.
14 * Alan Cox : More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
15 * Kai Schulte : Fixed #defines for IP_FIREWALL->FIREWALL
16 * David Woodhouse : Perform some basic ICMP handling.
17 * IPIP Routing without decapsulation.
18 * Carlos Picoto : GRE over IP support
19 * Alexey Kuznetsov: Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
20 * I do not want to merge them together.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
29 /* tunnel.c: an IP tunnel driver
31 The purpose of this driver is to provide an IP tunnel through
32 which you can tunnel network traffic transparently across subnets.
34 This was written by looking at Nick Holloway's dummy driver
35 Thanks for the great code!
37 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
40 Cleaned up the code a little and added some pre-1.3.0 tweaks.
41 dev->hard_header/hard_header_len changed to use no headers.
42 Comments/bracketing tweaked.
43 Made the tunnels use dev->name not tunnel: when error reporting.
46 -Alan Cox (Alan.Cox@linux.org) 21 March 95
49 Changed to tunnel to destination gateway in addition to the
50 tunnel's pointopoint address
51 Almost completely rewritten
52 Note: There is currently no firewall or ICMP handling done.
54 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
58 /* Things I wish I had known when writing the tunnel driver:
60 When the tunnel_xmit() function is called, the skb contains the
61 packet to be sent (plus a great deal of extra info), and dev
62 contains the tunnel device that _we_ are.
64 When we are passed a packet, we are expected to fill in the
65 source address with our source IP address.
67 What is the proper way to allocate, copy and free a buffer?
68 After you allocate it, it is a "0 length" chunk of memory
69 starting at zero. If you want to add headers to the buffer
70 later, you'll have to call "skb_reserve(skb, amount)" with
71 the amount of memory you want reserved. Then, you call
72 "skb_put(skb, amount)" with the amount of space you want in
73 the buffer. skb_put() returns a pointer to the top (#0) of
74 that buffer. skb->len is set to the amount of space you have
75 "allocated" with skb_put(). You can then write up to skb->len
76 bytes to that buffer. If you need more, you can call skb_put()
77 again with the additional amount of space you need. You can
78 find out how much more space you can allocate by calling
80 Now, to add header space, call "skb_push(skb, header_len)".
81 This creates space at the beginning of the buffer and returns
82 a pointer to this new space. If later you need to strip a
83 header from a buffer, call "skb_pull(skb, header_len)".
84 skb_headroom() will return how much space is left at the top
85 of the buffer (before the main data). Remember, this headroom
86 space must be reserved before the skb_put() function is called.
90 This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
92 For comments look at net/ipv4/ip_gre.c --ANK
96 #include <linux/capability.h>
97 #include <linux/module.h>
98 #include <linux/types.h>
99 #include <linux/sched.h>
100 #include <linux/kernel.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <linux/in.h>
105 #include <linux/tcp.h>
106 #include <linux/udp.h>
107 #include <linux/if_arp.h>
108 #include <linux/mroute.h>
109 #include <linux/init.h>
110 #include <linux/netfilter_ipv4.h>
111 #include <linux/if_ether.h>
113 #include <net/sock.h>
115 #include <net/icmp.h>
116 #include <net/ipip.h>
117 #include <net/inet_ecn.h>
118 #include <net/xfrm.h>
121 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
123 static int ipip_fb_tunnel_init(struct net_device *dev);
124 static int ipip_tunnel_init(struct net_device *dev);
125 static void ipip_tunnel_setup(struct net_device *dev);
127 static struct net_device *ipip_fb_tunnel_dev;
129 static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
130 static struct ip_tunnel *tunnels_r[HASH_SIZE];
131 static struct ip_tunnel *tunnels_l[HASH_SIZE];
132 static struct ip_tunnel *tunnels_wc[1];
133 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
135 static DEFINE_RWLOCK(ipip_lock);
137 static struct ip_tunnel * ipip_tunnel_lookup(__be32 remote, __be32 local)
139 unsigned h0 = HASH(remote);
140 unsigned h1 = HASH(local);
143 for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
144 if (local == t->parms.iph.saddr &&
145 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
148 for (t = tunnels_r[h0]; t; t = t->next) {
149 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
152 for (t = tunnels_l[h1]; t; t = t->next) {
153 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
156 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
161 static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
163 __be32 remote = t->parms.iph.daddr;
164 __be32 local = t->parms.iph.saddr;
176 return &tunnels[prio][h];
180 static void ipip_tunnel_unlink(struct ip_tunnel *t)
182 struct ip_tunnel **tp;
184 for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) {
186 write_lock_bh(&ipip_lock);
188 write_unlock_bh(&ipip_lock);
194 static void ipip_tunnel_link(struct ip_tunnel *t)
196 struct ip_tunnel **tp = ipip_bucket(t);
199 write_lock_bh(&ipip_lock);
201 write_unlock_bh(&ipip_lock);
204 static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
206 __be32 remote = parms->iph.daddr;
207 __be32 local = parms->iph.saddr;
208 struct ip_tunnel *t, **tp, *nt;
209 struct net_device *dev;
222 for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
223 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
230 strlcpy(name, parms->name, IFNAMSIZ);
233 for (i=1; i<100; i++) {
234 sprintf(name, "tunl%d", i);
235 if (__dev_get_by_name(name) == NULL)
242 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
246 nt = netdev_priv(dev);
247 SET_MODULE_OWNER(dev);
248 dev->init = ipip_tunnel_init;
251 if (register_netdevice(dev) < 0) {
257 ipip_tunnel_link(nt);
264 static void ipip_tunnel_uninit(struct net_device *dev)
266 if (dev == ipip_fb_tunnel_dev) {
267 write_lock_bh(&ipip_lock);
268 tunnels_wc[0] = NULL;
269 write_unlock_bh(&ipip_lock);
271 ipip_tunnel_unlink(netdev_priv(dev));
275 static int ipip_err(struct sk_buff *skb, u32 info)
277 #ifndef I_WISH_WORLD_WERE_PERFECT
279 /* It is not :-( All the routers (except for Linux) return only
280 8 bytes of packet payload. It means, that precise relaying of
281 ICMP in the real Internet is absolutely infeasible.
283 struct iphdr *iph = (struct iphdr*)skb->data;
284 int type = skb->h.icmph->type;
285 int code = skb->h.icmph->code;
291 case ICMP_PARAMETERPROB:
294 case ICMP_DEST_UNREACH:
297 case ICMP_PORT_UNREACH:
298 /* Impossible event. */
300 case ICMP_FRAG_NEEDED:
301 /* Soft state for pmtu is maintained by IP core. */
304 /* All others are translated to HOST_UNREACH.
305 rfc2003 contains "deep thoughts" about NET_UNREACH,
306 I believe they are just ether pollution. --ANK
311 case ICMP_TIME_EXCEEDED:
312 if (code != ICMP_EXC_TTL)
319 read_lock(&ipip_lock);
320 t = ipip_tunnel_lookup(iph->daddr, iph->saddr);
321 if (t == NULL || t->parms.iph.daddr == 0)
325 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
328 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
332 t->err_time = jiffies;
334 read_unlock(&ipip_lock);
337 struct iphdr *iph = (struct iphdr*)dp;
338 int hlen = iph->ihl<<2;
340 int type = skb->h.icmph->type;
341 int code = skb->h.icmph->code;
346 struct sk_buff *skb2;
350 if (len < hlen + sizeof(struct iphdr))
352 eiph = (struct iphdr*)(dp + hlen);
357 case ICMP_PARAMETERPROB:
358 n = ntohl(skb->h.icmph->un.gateway) >> 24;
362 /* So... This guy found something strange INSIDE encapsulated
363 packet. Well, he is fool, but what can we do ?
365 rel_type = ICMP_PARAMETERPROB;
366 rel_info = htonl((n - hlen) << 24);
369 case ICMP_DEST_UNREACH:
372 case ICMP_PORT_UNREACH:
373 /* Impossible event. */
375 case ICMP_FRAG_NEEDED:
376 /* And it is the only really necessary thing :-) */
377 n = ntohs(skb->h.icmph->un.frag.mtu);
381 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
382 if (n > ntohs(eiph->tot_len))
387 /* All others are translated to HOST_UNREACH.
388 rfc2003 contains "deep thoughts" about NET_UNREACH,
389 I believe, it is just ether pollution. --ANK
391 rel_type = ICMP_DEST_UNREACH;
392 rel_code = ICMP_HOST_UNREACH;
396 case ICMP_TIME_EXCEEDED:
397 if (code != ICMP_EXC_TTL)
402 /* Prepare fake skb to feed it to icmp_send */
403 skb2 = skb_clone(skb, GFP_ATOMIC);
406 dst_release(skb2->dst);
408 skb_pull(skb2, skb->data - (u8*)eiph);
409 skb2->nh.raw = skb2->data;
411 /* Try to guess incoming interface */
412 memset(&fl, 0, sizeof(fl));
413 fl.fl4_daddr = eiph->saddr;
414 fl.fl4_tos = RT_TOS(eiph->tos);
415 fl.proto = IPPROTO_IPIP;
416 if (ip_route_output_key(&rt, &key)) {
420 skb2->dev = rt->u.dst.dev;
422 /* route "incoming" packet */
423 if (rt->rt_flags&RTCF_LOCAL) {
426 fl.fl4_daddr = eiph->daddr;
427 fl.fl4_src = eiph->saddr;
428 fl.fl4_tos = eiph->tos;
429 if (ip_route_output_key(&rt, &fl) ||
430 rt->u.dst.dev->type != ARPHRD_TUNNEL) {
437 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
438 skb2->dst->dev->type != ARPHRD_TUNNEL) {
444 /* change mtu on this route */
445 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
446 if (n > dst_mtu(skb2->dst)) {
450 skb2->dst->ops->update_pmtu(skb2->dst, n);
451 } else if (type == ICMP_TIME_EXCEEDED) {
452 struct ip_tunnel *t = netdev_priv(skb2->dev);
453 if (t->parms.iph.ttl) {
454 rel_type = ICMP_DEST_UNREACH;
455 rel_code = ICMP_HOST_UNREACH;
459 icmp_send(skb2, rel_type, rel_code, rel_info);
465 static inline void ipip_ecn_decapsulate(struct iphdr *outer_iph, struct sk_buff *skb)
467 struct iphdr *inner_iph = skb->nh.iph;
469 if (INET_ECN_is_ce(outer_iph->tos))
470 IP_ECN_set_ce(inner_iph);
473 static int ipip_rcv(struct sk_buff *skb)
476 struct ip_tunnel *tunnel;
480 read_lock(&ipip_lock);
481 if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
482 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
483 read_unlock(&ipip_lock);
490 skb->mac.raw = skb->nh.raw;
491 skb->nh.raw = skb->data;
492 skb->protocol = htons(ETH_P_IP);
493 skb->pkt_type = PACKET_HOST;
495 tunnel->stat.rx_packets++;
496 tunnel->stat.rx_bytes += skb->len;
497 skb->dev = tunnel->dev;
498 dst_release(skb->dst);
501 ipip_ecn_decapsulate(iph, skb);
503 read_unlock(&ipip_lock);
506 read_unlock(&ipip_lock);
512 * This function assumes it is being called from dev_queue_xmit()
513 * and that skb is filled properly by that function.
516 static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
518 struct ip_tunnel *tunnel = netdev_priv(dev);
519 struct net_device_stats *stats = &tunnel->stat;
520 struct iphdr *tiph = &tunnel->parms.iph;
521 u8 tos = tunnel->parms.iph.tos;
522 __be16 df = tiph->frag_off;
523 struct rtable *rt; /* Route to the other host */
524 struct net_device *tdev; /* Device to other host */
525 struct iphdr *old_iph = skb->nh.iph;
526 struct iphdr *iph; /* Our new IP header */
527 int max_headroom; /* The extra header space needed */
528 __be32 dst = tiph->daddr;
531 if (tunnel->recursion++) {
532 tunnel->stat.collisions++;
536 if (skb->protocol != htons(ETH_P_IP))
544 if ((rt = (struct rtable*)skb->dst) == NULL) {
545 tunnel->stat.tx_fifo_errors++;
548 if ((dst = rt->rt_gateway) == 0)
553 struct flowi fl = { .oif = tunnel->parms.link,
556 .saddr = tiph->saddr,
557 .tos = RT_TOS(tos) } },
558 .proto = IPPROTO_IPIP };
559 if (ip_route_output_key(&rt, &fl)) {
560 tunnel->stat.tx_carrier_errors++;
564 tdev = rt->u.dst.dev;
568 tunnel->stat.collisions++;
573 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
575 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
578 tunnel->stat.collisions++;
583 skb->dst->ops->update_pmtu(skb->dst, mtu);
585 df |= (old_iph->frag_off&htons(IP_DF));
587 if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
588 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
593 if (tunnel->err_count > 0) {
594 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
596 dst_link_failure(skb);
598 tunnel->err_count = 0;
602 * Okay, now see if we can stuff it in the buffer as-is.
604 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr));
606 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
607 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
616 skb_set_owner_w(new_skb, skb->sk);
619 old_iph = skb->nh.iph;
622 skb->h.raw = skb->nh.raw;
623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
627 dst_release(skb->dst);
628 skb->dst = &rt->u.dst;
631 * Push down and install the IPIP header.
636 iph->ihl = sizeof(struct iphdr)>>2;
638 iph->protocol = IPPROTO_IPIP;
639 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
640 iph->daddr = rt->rt_dst;
641 iph->saddr = rt->rt_src;
643 if ((iph->ttl = tiph->ttl) == 0)
644 iph->ttl = old_iph->ttl;
653 dst_link_failure(skb);
662 ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
665 struct ip_tunnel_parm p;
671 if (dev == ipip_fb_tunnel_dev) {
672 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
676 t = ipip_tunnel_locate(&p, 0);
679 t = netdev_priv(dev);
680 memcpy(&p, &t->parms, sizeof(p));
681 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
688 if (!capable(CAP_NET_ADMIN))
692 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
696 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
697 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
700 p.iph.frag_off |= htons(IP_DF);
702 t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
704 if (dev != ipip_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
711 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
712 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
716 t = netdev_priv(dev);
717 ipip_tunnel_unlink(t);
718 t->parms.iph.saddr = p.iph.saddr;
719 t->parms.iph.daddr = p.iph.daddr;
720 memcpy(dev->dev_addr, &p.iph.saddr, 4);
721 memcpy(dev->broadcast, &p.iph.daddr, 4);
723 netdev_state_change(dev);
729 if (cmd == SIOCCHGTUNNEL) {
730 t->parms.iph.ttl = p.iph.ttl;
731 t->parms.iph.tos = p.iph.tos;
732 t->parms.iph.frag_off = p.iph.frag_off;
734 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
737 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
742 if (!capable(CAP_NET_ADMIN))
745 if (dev == ipip_fb_tunnel_dev) {
747 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
750 if ((t = ipip_tunnel_locate(&p, 0)) == NULL)
753 if (t->dev == ipip_fb_tunnel_dev)
757 unregister_netdevice(dev);
769 static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
771 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
774 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
776 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
782 static void ipip_tunnel_setup(struct net_device *dev)
784 SET_MODULE_OWNER(dev);
785 dev->uninit = ipip_tunnel_uninit;
786 dev->hard_start_xmit = ipip_tunnel_xmit;
787 dev->get_stats = ipip_tunnel_get_stats;
788 dev->do_ioctl = ipip_tunnel_ioctl;
789 dev->change_mtu = ipip_tunnel_change_mtu;
790 dev->destructor = free_netdev;
792 dev->type = ARPHRD_TUNNEL;
793 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
794 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
795 dev->flags = IFF_NOARP;
800 static int ipip_tunnel_init(struct net_device *dev)
802 struct net_device *tdev = NULL;
803 struct ip_tunnel *tunnel;
806 tunnel = netdev_priv(dev);
807 iph = &tunnel->parms.iph;
810 strcpy(tunnel->parms.name, dev->name);
812 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
813 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
816 struct flowi fl = { .oif = tunnel->parms.link,
818 { .daddr = iph->daddr,
820 .tos = RT_TOS(iph->tos) } },
821 .proto = IPPROTO_IPIP };
823 if (!ip_route_output_key(&rt, &fl)) {
824 tdev = rt->u.dst.dev;
827 dev->flags |= IFF_POINTOPOINT;
830 if (!tdev && tunnel->parms.link)
831 tdev = __dev_get_by_index(tunnel->parms.link);
834 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
835 dev->mtu = tdev->mtu - sizeof(struct iphdr);
837 dev->iflink = tunnel->parms.link;
842 static int __init ipip_fb_tunnel_init(struct net_device *dev)
844 struct ip_tunnel *tunnel = netdev_priv(dev);
845 struct iphdr *iph = &tunnel->parms.iph;
848 strcpy(tunnel->parms.name, dev->name);
851 iph->protocol = IPPROTO_IPIP;
855 tunnels_wc[0] = tunnel;
859 static struct xfrm_tunnel ipip_handler = {
861 .err_handler = ipip_err,
865 static char banner[] __initdata =
866 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
868 static int __init ipip_init(void)
874 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
875 printk(KERN_INFO "ipip init: can't register tunnel\n");
879 ipip_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
882 if (!ipip_fb_tunnel_dev) {
887 ipip_fb_tunnel_dev->init = ipip_fb_tunnel_init;
889 if ((err = register_netdev(ipip_fb_tunnel_dev)))
894 free_netdev(ipip_fb_tunnel_dev);
896 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
900 static void __exit ipip_destroy_tunnels(void)
904 for (prio = 1; prio < 4; prio++) {
906 for (h = 0; h < HASH_SIZE; h++) {
908 while ((t = tunnels[prio][h]) != NULL)
909 unregister_netdevice(t->dev);
914 static void __exit ipip_fini(void)
916 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
917 printk(KERN_INFO "ipip close: can't deregister tunnel\n");
920 ipip_destroy_tunnels();
921 unregister_netdevice(ipip_fb_tunnel_dev);
925 module_init(ipip_init);
926 module_exit(ipip_fini);
927 MODULE_LICENSE("GPL");