2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <asm/uaccess.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/tcp.h>
22 #include <linux/udp.h>
23 #include <linux/if_arp.h>
24 #include <linux/mroute.h>
25 #include <linux/init.h>
26 #include <linux/in6.h>
27 #include <linux/inetdevice.h>
28 #include <linux/igmp.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/if_ether.h>
35 #include <net/protocol.h>
38 #include <net/checksum.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
45 #include <net/ip6_fib.h>
46 #include <net/ip6_route.h>
53 1. The most important issue is detecting local dead loops.
54 They would cause complete host lockup in transmit, which
55 would be "resolved" by stack overflow or, if queueing is enabled,
56 with infinite looping in net_bh.
58 We cannot track such dead loops during route installation,
59 it is infeasible task. The most general solutions would be
60 to keep skb->encapsulation counter (sort of local ttl),
61 and silently drop packet when it expires. It is the best
62 solution, but it supposes maintaing new variable in ALL
63 skb, even if no tunneling is used.
65 Current solution: t->recursion lock breaks dead loops. It looks
66 like dev->tbusy flag, but I preferred new variable, because
67 the semantics is different. One day, when hard_start_xmit
68 will be multithreaded we will have to use skb->encapsulation.
72 2. Networking dead loops would not kill routers, but would really
73 kill network. IP hop limit plays role of "t->recursion" in this case,
74 if we copy it from packet being encapsulated to upper header.
75 It is very good solution, but it introduces two problems:
77 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78 do not work over tunnels.
79 - traceroute does not work. I planned to relay ICMP from tunnel,
80 so that this problem would be solved and traceroute output
81 would even more informative. This idea appeared to be wrong:
82 only Linux complies to rfc1812 now (yes, guys, Linux is the only
83 true router now :-)), all routers (at least, in neighbourhood of mine)
84 return only 8 bytes of payload. It is the end.
86 Hence, if we want that OSPF worked or traceroute said something reasonable,
87 we should search for another solution.
89 One of them is to parse packet trying to detect inner encapsulation
90 made by our node. It is difficult or even impossible, especially,
91 taking into account fragmentation. TO be short, tt is not solution at all.
93 Current solution: The solution was UNEXPECTEDLY SIMPLE.
94 We force DF flag on tunnels with preconfigured hop limit,
95 that is ALL. :-) Well, it does not remove the problem completely,
96 but exponential growth of network traffic is changed to linear
97 (branches, that exceed pmtu are pruned) and tunnel mtu
98 fastly degrades to value <68, where looping stops.
99 Yes, it is not good if there exists a router in the loop,
100 which does not force DF, even when encapsulating packets have DF set.
101 But it is not our problem! Nobody could accuse us, we made
102 all that we could make. Even if it is your gated who injected
103 fatal route to network, even if it were you who configured
104 fatal static route: you are innocent. :-)
108 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
109 practically identical code. It would be good to glue them
110 together, but it is not very evident, how to make them modular.
111 sit is integral part of IPv6, ipip and gre are naturally modular.
112 We could extract common parts (hash table, ioctl etc)
113 to a separate module (ip_tunnel.c).
118 static int ipgre_tunnel_init(struct net_device *dev);
119 static void ipgre_tunnel_setup(struct net_device *dev);
121 /* Fallback tunnel: no source, no destination, no key, no options */
123 static int ipgre_fb_tunnel_init(struct net_device *dev);
125 static struct net_device *ipgre_fb_tunnel_dev;
127 /* Tunnel hash table */
137 We require exact key match i.e. if a key is present in packet
138 it will match only tunnel with the same key; if it is not present,
139 it will match only keyless tunnel.
141 All keysless packets, if not matched configured keyless tunnels
142 will match fallback tunnel.
146 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
148 static struct ip_tunnel *tunnels[4][HASH_SIZE];
150 #define tunnels_r_l (tunnels[3])
151 #define tunnels_r (tunnels[2])
152 #define tunnels_l (tunnels[1])
153 #define tunnels_wc (tunnels[0])
155 static DEFINE_RWLOCK(ipgre_lock);
157 /* Given src, dst and key, find appropriate for input tunnel. */
159 static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key)
161 unsigned h0 = HASH(remote);
162 unsigned h1 = HASH(key);
165 for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
166 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
167 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
171 for (t = tunnels_r[h0^h1]; t; t = t->next) {
172 if (remote == t->parms.iph.daddr) {
173 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
177 for (t = tunnels_l[h1]; t; t = t->next) {
178 if (local == t->parms.iph.saddr ||
179 (local == t->parms.iph.daddr &&
180 ipv4_is_multicast(local))) {
181 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
185 for (t = tunnels_wc[h1]; t; t = t->next) {
186 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
190 if (ipgre_fb_tunnel_dev->flags&IFF_UP)
191 return netdev_priv(ipgre_fb_tunnel_dev);
195 static struct ip_tunnel **__ipgre_bucket(struct ip_tunnel_parm *parms)
197 __be32 remote = parms->iph.daddr;
198 __be32 local = parms->iph.saddr;
199 __be32 key = parms->i_key;
200 unsigned h = HASH(key);
205 if (remote && !ipv4_is_multicast(remote)) {
210 return &tunnels[prio][h];
213 static inline struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
215 return __ipgre_bucket(&t->parms);
218 static void ipgre_tunnel_link(struct ip_tunnel *t)
220 struct ip_tunnel **tp = ipgre_bucket(t);
223 write_lock_bh(&ipgre_lock);
225 write_unlock_bh(&ipgre_lock);
228 static void ipgre_tunnel_unlink(struct ip_tunnel *t)
230 struct ip_tunnel **tp;
232 for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
234 write_lock_bh(&ipgre_lock);
236 write_unlock_bh(&ipgre_lock);
242 static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
244 __be32 remote = parms->iph.daddr;
245 __be32 local = parms->iph.saddr;
246 __be32 key = parms->i_key;
247 struct ip_tunnel *t, **tp, *nt;
248 struct net_device *dev;
251 for (tp = __ipgre_bucket(parms); (t = *tp) != NULL; tp = &t->next) {
252 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
253 if (key == t->parms.i_key)
261 strlcpy(name, parms->name, IFNAMSIZ);
263 sprintf(name, "gre%%d");
265 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
269 if (strchr(name, '%')) {
270 if (dev_alloc_name(dev, name) < 0)
274 dev->init = ipgre_tunnel_init;
275 nt = netdev_priv(dev);
278 if (register_netdevice(dev) < 0)
282 ipgre_tunnel_link(nt);
290 static void ipgre_tunnel_uninit(struct net_device *dev)
292 ipgre_tunnel_unlink(netdev_priv(dev));
297 static void ipgre_err(struct sk_buff *skb, u32 info)
299 #ifndef I_WISH_WORLD_WERE_PERFECT
301 /* It is not :-( All the routers (except for Linux) return only
302 8 bytes of packet payload. It means, that precise relaying of
303 ICMP in the real Internet is absolutely infeasible.
305 Moreover, Cisco "wise men" put GRE key to the third word
306 in GRE header. It makes impossible maintaining even soft state for keyed
307 GRE tunnels with enabled checksum. Tell them "thank you".
309 Well, I wonder, rfc1812 was written by Cisco employee,
310 what the hell these idiots break standrads established
314 struct iphdr *iph = (struct iphdr*)skb->data;
315 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
316 int grehlen = (iph->ihl<<2) + 4;
317 const int type = icmp_hdr(skb)->type;
318 const int code = icmp_hdr(skb)->code;
323 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
324 if (flags&(GRE_VERSION|GRE_ROUTING))
333 /* If only 8 bytes returned, keyed message will be dropped here */
334 if (skb_headlen(skb) < grehlen)
339 case ICMP_PARAMETERPROB:
342 case ICMP_DEST_UNREACH:
345 case ICMP_PORT_UNREACH:
346 /* Impossible event. */
348 case ICMP_FRAG_NEEDED:
349 /* Soft state for pmtu is maintained by IP core. */
352 /* All others are translated to HOST_UNREACH.
353 rfc2003 contains "deep thoughts" about NET_UNREACH,
354 I believe they are just ether pollution. --ANK
359 case ICMP_TIME_EXCEEDED:
360 if (code != ICMP_EXC_TTL)
365 read_lock(&ipgre_lock);
366 t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0);
367 if (t == NULL || t->parms.iph.daddr == 0 ||
368 ipv4_is_multicast(t->parms.iph.daddr))
371 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
374 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
378 t->err_time = jiffies;
380 read_unlock(&ipgre_lock);
383 struct iphdr *iph = (struct iphdr*)dp;
385 __be16 *p = (__be16*)(dp+(iph->ihl<<2));
386 const int type = icmp_hdr(skb)->type;
387 const int code = icmp_hdr(skb)->code;
393 int grehlen = (iph->ihl<<2) + 4;
394 struct sk_buff *skb2;
398 if (p[1] != htons(ETH_P_IP))
402 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
403 if (flags&(GRE_VERSION|GRE_ROUTING))
412 if (len < grehlen + sizeof(struct iphdr))
414 eiph = (struct iphdr*)(dp + grehlen);
419 case ICMP_PARAMETERPROB:
420 n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
421 if (n < (iph->ihl<<2))
424 /* So... This guy found something strange INSIDE encapsulated
425 packet. Well, he is fool, but what can we do ?
427 rel_type = ICMP_PARAMETERPROB;
429 rel_info = htonl(n << 24);
432 case ICMP_DEST_UNREACH:
435 case ICMP_PORT_UNREACH:
436 /* Impossible event. */
438 case ICMP_FRAG_NEEDED:
439 /* And it is the only really necessary thing :-) */
440 n = ntohs(icmp_hdr(skb)->un.frag.mtu);
444 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
445 if (n > ntohs(eiph->tot_len))
450 /* All others are translated to HOST_UNREACH.
451 rfc2003 contains "deep thoughts" about NET_UNREACH,
452 I believe, it is just ether pollution. --ANK
454 rel_type = ICMP_DEST_UNREACH;
455 rel_code = ICMP_HOST_UNREACH;
459 case ICMP_TIME_EXCEEDED:
460 if (code != ICMP_EXC_TTL)
465 /* Prepare fake skb to feed it to icmp_send */
466 skb2 = skb_clone(skb, GFP_ATOMIC);
469 dst_release(skb2->dst);
471 skb_pull(skb2, skb->data - (u8*)eiph);
472 skb_reset_network_header(skb2);
474 /* Try to guess incoming interface */
475 memset(&fl, 0, sizeof(fl));
476 fl.fl4_dst = eiph->saddr;
477 fl.fl4_tos = RT_TOS(eiph->tos);
478 fl.proto = IPPROTO_GRE;
479 if (ip_route_output_key(&init_net, &rt, &fl)) {
483 skb2->dev = rt->u.dst.dev;
485 /* route "incoming" packet */
486 if (rt->rt_flags&RTCF_LOCAL) {
489 fl.fl4_dst = eiph->daddr;
490 fl.fl4_src = eiph->saddr;
491 fl.fl4_tos = eiph->tos;
492 if (ip_route_output_key(&init_net, &rt, &fl) ||
493 rt->u.dst.dev->type != ARPHRD_IPGRE) {
500 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
501 skb2->dst->dev->type != ARPHRD_IPGRE) {
507 /* change mtu on this route */
508 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
509 if (n > dst_mtu(skb2->dst)) {
513 skb2->dst->ops->update_pmtu(skb2->dst, n);
514 } else if (type == ICMP_TIME_EXCEEDED) {
515 struct ip_tunnel *t = netdev_priv(skb2->dev);
516 if (t->parms.iph.ttl) {
517 rel_type = ICMP_DEST_UNREACH;
518 rel_code = ICMP_HOST_UNREACH;
522 icmp_send(skb2, rel_type, rel_code, rel_info);
527 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
529 if (INET_ECN_is_ce(iph->tos)) {
530 if (skb->protocol == htons(ETH_P_IP)) {
531 IP_ECN_set_ce(ip_hdr(skb));
532 } else if (skb->protocol == htons(ETH_P_IPV6)) {
533 IP6_ECN_set_ce(ipv6_hdr(skb));
539 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
542 if (skb->protocol == htons(ETH_P_IP))
543 inner = old_iph->tos;
544 else if (skb->protocol == htons(ETH_P_IPV6))
545 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
546 return INET_ECN_encapsulate(tos, inner);
549 static int ipgre_rcv(struct sk_buff *skb)
557 struct ip_tunnel *tunnel;
560 if (!pskb_may_pull(skb, 16))
567 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
568 /* - Version must be 0.
569 - We do not support routing headers.
571 if (flags&(GRE_VERSION|GRE_ROUTING))
574 if (flags&GRE_CSUM) {
575 switch (skb->ip_summed) {
576 case CHECKSUM_COMPLETE:
577 csum = csum_fold(skb->csum);
583 csum = __skb_checksum_complete(skb);
584 skb->ip_summed = CHECKSUM_COMPLETE;
589 key = *(__be32*)(h + offset);
593 seqno = ntohl(*(__be32*)(h + offset));
598 read_lock(&ipgre_lock);
599 if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
602 skb->protocol = *(__be16*)(h + 2);
603 /* WCCP version 1 and 2 protocol decoding.
604 * - Change protocol to IP
605 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
608 skb->protocol == htons(ETH_P_WCCP)) {
609 skb->protocol = htons(ETH_P_IP);
610 if ((*(h + offset) & 0xF0) != 0x40)
614 skb->mac_header = skb->network_header;
615 __pskb_pull(skb, offset);
616 skb_reset_network_header(skb);
617 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
618 skb->pkt_type = PACKET_HOST;
619 #ifdef CONFIG_NET_IPGRE_BROADCAST
620 if (ipv4_is_multicast(iph->daddr)) {
621 /* Looped back packet, drop it! */
622 if (((struct rtable*)skb->dst)->fl.iif == 0)
624 tunnel->stat.multicast++;
625 skb->pkt_type = PACKET_BROADCAST;
629 if (((flags&GRE_CSUM) && csum) ||
630 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
631 tunnel->stat.rx_crc_errors++;
632 tunnel->stat.rx_errors++;
635 if (tunnel->parms.i_flags&GRE_SEQ) {
636 if (!(flags&GRE_SEQ) ||
637 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
638 tunnel->stat.rx_fifo_errors++;
639 tunnel->stat.rx_errors++;
642 tunnel->i_seqno = seqno + 1;
644 tunnel->stat.rx_packets++;
645 tunnel->stat.rx_bytes += skb->len;
646 skb->dev = tunnel->dev;
647 dst_release(skb->dst);
650 ipgre_ecn_decapsulate(iph, skb);
652 read_unlock(&ipgre_lock);
655 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
658 read_unlock(&ipgre_lock);
664 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
666 struct ip_tunnel *tunnel = netdev_priv(dev);
667 struct net_device_stats *stats = &tunnel->stat;
668 struct iphdr *old_iph = ip_hdr(skb);
672 struct rtable *rt; /* Route to the other host */
673 struct net_device *tdev; /* Device to other host */
674 struct iphdr *iph; /* Our new IP header */
675 unsigned int max_headroom; /* The extra header space needed */
680 if (tunnel->recursion++) {
681 tunnel->stat.collisions++;
685 if (dev->header_ops) {
687 tiph = (struct iphdr*)skb->data;
689 gre_hlen = tunnel->hlen;
690 tiph = &tunnel->parms.iph;
693 if ((dst = tiph->daddr) == 0) {
696 if (skb->dst == NULL) {
697 tunnel->stat.tx_fifo_errors++;
701 if (skb->protocol == htons(ETH_P_IP)) {
702 rt = (struct rtable*)skb->dst;
703 if ((dst = rt->rt_gateway) == 0)
707 else if (skb->protocol == htons(ETH_P_IPV6)) {
708 struct in6_addr *addr6;
710 struct neighbour *neigh = skb->dst->neighbour;
715 addr6 = (struct in6_addr*)&neigh->primary_key;
716 addr_type = ipv6_addr_type(addr6);
718 if (addr_type == IPV6_ADDR_ANY) {
719 addr6 = &ipv6_hdr(skb)->daddr;
720 addr_type = ipv6_addr_type(addr6);
723 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
726 dst = addr6->s6_addr32[3];
735 if (skb->protocol == htons(ETH_P_IP))
741 struct flowi fl = { .oif = tunnel->parms.link,
744 .saddr = tiph->saddr,
745 .tos = RT_TOS(tos) } },
746 .proto = IPPROTO_GRE };
747 if (ip_route_output_key(&init_net, &rt, &fl)) {
748 tunnel->stat.tx_carrier_errors++;
752 tdev = rt->u.dst.dev;
756 tunnel->stat.collisions++;
762 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
764 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
767 skb->dst->ops->update_pmtu(skb->dst, mtu);
769 if (skb->protocol == htons(ETH_P_IP)) {
770 df |= (old_iph->frag_off&htons(IP_DF));
772 if ((old_iph->frag_off&htons(IP_DF)) &&
773 mtu < ntohs(old_iph->tot_len)) {
774 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
780 else if (skb->protocol == htons(ETH_P_IPV6)) {
781 struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
783 if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
784 if ((tunnel->parms.iph.daddr &&
785 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
786 rt6->rt6i_dst.plen == 128) {
787 rt6->rt6i_flags |= RTF_MODIFIED;
788 skb->dst->metrics[RTAX_MTU-1] = mtu;
792 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
793 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
800 if (tunnel->err_count > 0) {
801 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
804 dst_link_failure(skb);
806 tunnel->err_count = 0;
809 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
811 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
812 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
813 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
822 skb_set_owner_w(new_skb, skb->sk);
825 old_iph = ip_hdr(skb);
828 skb->transport_header = skb->network_header;
829 skb_push(skb, gre_hlen);
830 skb_reset_network_header(skb);
831 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
832 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
834 dst_release(skb->dst);
835 skb->dst = &rt->u.dst;
838 * Push down and install the IPIP header.
843 iph->ihl = sizeof(struct iphdr) >> 2;
845 iph->protocol = IPPROTO_GRE;
846 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
847 iph->daddr = rt->rt_dst;
848 iph->saddr = rt->rt_src;
850 if ((iph->ttl = tiph->ttl) == 0) {
851 if (skb->protocol == htons(ETH_P_IP))
852 iph->ttl = old_iph->ttl;
854 else if (skb->protocol == htons(ETH_P_IPV6))
855 iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
858 iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
861 ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
862 ((__be16*)(iph+1))[1] = skb->protocol;
864 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
865 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
867 if (tunnel->parms.o_flags&GRE_SEQ) {
869 *ptr = htonl(tunnel->o_seqno);
872 if (tunnel->parms.o_flags&GRE_KEY) {
873 *ptr = tunnel->parms.o_key;
876 if (tunnel->parms.o_flags&GRE_CSUM) {
878 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
889 dst_link_failure(skb);
898 static void ipgre_tunnel_bind_dev(struct net_device *dev)
900 struct net_device *tdev = NULL;
901 struct ip_tunnel *tunnel;
903 int hlen = LL_MAX_HEADER;
904 int mtu = ETH_DATA_LEN;
905 int addend = sizeof(struct iphdr) + 4;
907 tunnel = netdev_priv(dev);
908 iph = &tunnel->parms.iph;
910 /* Guess output device to choose reasonable mtu and hard_header_len */
913 struct flowi fl = { .oif = tunnel->parms.link,
915 { .daddr = iph->daddr,
917 .tos = RT_TOS(iph->tos) } },
918 .proto = IPPROTO_GRE };
920 if (!ip_route_output_key(&init_net, &rt, &fl)) {
921 tdev = rt->u.dst.dev;
924 dev->flags |= IFF_POINTOPOINT;
927 if (!tdev && tunnel->parms.link)
928 tdev = __dev_get_by_index(&init_net, tunnel->parms.link);
931 hlen = tdev->hard_header_len;
934 dev->iflink = tunnel->parms.link;
936 /* Precalculate GRE options length */
937 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
938 if (tunnel->parms.o_flags&GRE_CSUM)
940 if (tunnel->parms.o_flags&GRE_KEY)
942 if (tunnel->parms.o_flags&GRE_SEQ)
945 dev->hard_header_len = hlen + addend;
946 dev->mtu = mtu - addend;
947 tunnel->hlen = addend;
952 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
955 struct ip_tunnel_parm p;
961 if (dev == ipgre_fb_tunnel_dev) {
962 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
966 t = ipgre_tunnel_locate(&p, 0);
969 t = netdev_priv(dev);
970 memcpy(&p, &t->parms, sizeof(p));
971 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
978 if (!capable(CAP_NET_ADMIN))
982 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
986 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
987 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
988 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
991 p.iph.frag_off |= htons(IP_DF);
993 if (!(p.i_flags&GRE_KEY))
995 if (!(p.o_flags&GRE_KEY))
998 t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
1000 if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1002 if (t->dev != dev) {
1009 t = netdev_priv(dev);
1011 if (ipv4_is_multicast(p.iph.daddr))
1012 nflags = IFF_BROADCAST;
1013 else if (p.iph.daddr)
1014 nflags = IFF_POINTOPOINT;
1016 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1020 ipgre_tunnel_unlink(t);
1021 t->parms.iph.saddr = p.iph.saddr;
1022 t->parms.iph.daddr = p.iph.daddr;
1023 t->parms.i_key = p.i_key;
1024 t->parms.o_key = p.o_key;
1025 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1026 memcpy(dev->broadcast, &p.iph.daddr, 4);
1027 ipgre_tunnel_link(t);
1028 netdev_state_change(dev);
1034 if (cmd == SIOCCHGTUNNEL) {
1035 t->parms.iph.ttl = p.iph.ttl;
1036 t->parms.iph.tos = p.iph.tos;
1037 t->parms.iph.frag_off = p.iph.frag_off;
1038 if (t->parms.link != p.link) {
1039 t->parms.link = p.link;
1040 ipgre_tunnel_bind_dev(dev);
1041 netdev_state_change(dev);
1044 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1047 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1052 if (!capable(CAP_NET_ADMIN))
1055 if (dev == ipgre_fb_tunnel_dev) {
1057 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1060 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
1063 if (t == netdev_priv(ipgre_fb_tunnel_dev))
1067 unregister_netdevice(dev);
1079 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1081 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1084 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1086 struct ip_tunnel *tunnel = netdev_priv(dev);
1087 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1093 /* Nice toy. Unfortunately, useless in real life :-)
1094 It allows to construct virtual multiprotocol broadcast "LAN"
1095 over the Internet, provided multicast routing is tuned.
1098 I have no idea was this bicycle invented before me,
1099 so that I had to set ARPHRD_IPGRE to a random value.
1100 I have an impression, that Cisco could make something similar,
1101 but this feature is apparently missing in IOS<=11.2(8).
1103 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1104 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1106 ping -t 255 224.66.66.66
1108 If nobody answers, mbone does not work.
1110 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1111 ip addr add 10.66.66.<somewhat>/24 dev Universe
1112 ifconfig Universe up
1113 ifconfig Universe add fe80::<Your_real_addr>/10
1114 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1117 ftp fec0:6666:6666::193.233.7.65
1122 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1123 unsigned short type,
1124 const void *daddr, const void *saddr, unsigned len)
1126 struct ip_tunnel *t = netdev_priv(dev);
1127 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1128 __be16 *p = (__be16*)(iph+1);
1130 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1131 p[0] = t->parms.o_flags;
1135 * Set the source hardware address.
1139 memcpy(&iph->saddr, saddr, 4);
1142 memcpy(&iph->daddr, daddr, 4);
1145 if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1151 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1153 struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
1154 memcpy(haddr, &iph->saddr, 4);
1158 static const struct header_ops ipgre_header_ops = {
1159 .create = ipgre_header,
1160 .parse = ipgre_header_parse,
1163 #ifdef CONFIG_NET_IPGRE_BROADCAST
1164 static int ipgre_open(struct net_device *dev)
1166 struct ip_tunnel *t = netdev_priv(dev);
1168 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1169 struct flowi fl = { .oif = t->parms.link,
1171 { .daddr = t->parms.iph.daddr,
1172 .saddr = t->parms.iph.saddr,
1173 .tos = RT_TOS(t->parms.iph.tos) } },
1174 .proto = IPPROTO_GRE };
1176 if (ip_route_output_key(&init_net, &rt, &fl))
1177 return -EADDRNOTAVAIL;
1178 dev = rt->u.dst.dev;
1180 if (__in_dev_get_rtnl(dev) == NULL)
1181 return -EADDRNOTAVAIL;
1182 t->mlink = dev->ifindex;
1183 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1188 static int ipgre_close(struct net_device *dev)
1190 struct ip_tunnel *t = netdev_priv(dev);
1191 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1192 struct in_device *in_dev;
1193 in_dev = inetdev_by_index(dev->nd_net, t->mlink);
1195 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1204 static void ipgre_tunnel_setup(struct net_device *dev)
1206 dev->uninit = ipgre_tunnel_uninit;
1207 dev->destructor = free_netdev;
1208 dev->hard_start_xmit = ipgre_tunnel_xmit;
1209 dev->get_stats = ipgre_tunnel_get_stats;
1210 dev->do_ioctl = ipgre_tunnel_ioctl;
1211 dev->change_mtu = ipgre_tunnel_change_mtu;
1213 dev->type = ARPHRD_IPGRE;
1214 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1215 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1216 dev->flags = IFF_NOARP;
1221 static int ipgre_tunnel_init(struct net_device *dev)
1223 struct ip_tunnel *tunnel;
1226 tunnel = netdev_priv(dev);
1227 iph = &tunnel->parms.iph;
1230 strcpy(tunnel->parms.name, dev->name);
1232 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1233 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1235 ipgre_tunnel_bind_dev(dev);
1238 #ifdef CONFIG_NET_IPGRE_BROADCAST
1239 if (ipv4_is_multicast(iph->daddr)) {
1242 dev->flags = IFF_BROADCAST;
1243 dev->header_ops = &ipgre_header_ops;
1244 dev->open = ipgre_open;
1245 dev->stop = ipgre_close;
1249 dev->header_ops = &ipgre_header_ops;
1254 static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1256 struct ip_tunnel *tunnel = netdev_priv(dev);
1257 struct iphdr *iph = &tunnel->parms.iph;
1260 strcpy(tunnel->parms.name, dev->name);
1263 iph->protocol = IPPROTO_GRE;
1265 tunnel->hlen = sizeof(struct iphdr) + 4;
1268 tunnels_wc[0] = tunnel;
1273 static struct net_protocol ipgre_protocol = {
1274 .handler = ipgre_rcv,
1275 .err_handler = ipgre_err,
1280 * And now the modules code and kernel interface.
1283 static int __init ipgre_init(void)
1287 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1289 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1290 printk(KERN_INFO "ipgre init: can't add protocol\n");
1294 ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1295 ipgre_tunnel_setup);
1296 if (!ipgre_fb_tunnel_dev) {
1301 ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1303 if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1308 free_netdev(ipgre_fb_tunnel_dev);
1310 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1314 static void __exit ipgre_destroy_tunnels(void)
1318 for (prio = 0; prio < 4; prio++) {
1320 for (h = 0; h < HASH_SIZE; h++) {
1321 struct ip_tunnel *t;
1322 while ((t = tunnels[prio][h]) != NULL)
1323 unregister_netdevice(t->dev);
1328 static void __exit ipgre_fini(void)
1330 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1331 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1334 ipgre_destroy_tunnels();
1338 module_init(ipgre_init);
1339 module_exit(ipgre_fini);
1340 MODULE_LICENSE("GPL");