2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <asm/uaccess.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/tcp.h>
22 #include <linux/udp.h>
23 #include <linux/if_arp.h>
24 #include <linux/mroute.h>
25 #include <linux/init.h>
26 #include <linux/in6.h>
27 #include <linux/inetdevice.h>
28 #include <linux/igmp.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/if_ether.h>
35 #include <net/protocol.h>
38 #include <net/checksum.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
55 1. The most important issue is detecting local dead loops.
56 They would cause complete host lockup in transmit, which
57 would be "resolved" by stack overflow or, if queueing is enabled,
58 with infinite looping in net_bh.
60 We cannot track such dead loops during route installation,
61 it is infeasible task. The most general solutions would be
62 to keep skb->encapsulation counter (sort of local ttl),
63 and silently drop packet when it expires. It is the best
64 solution, but it supposes maintaing new variable in ALL
65 skb, even if no tunneling is used.
67 Current solution: t->recursion lock breaks dead loops. It looks
68 like dev->tbusy flag, but I preferred new variable, because
69 the semantics is different. One day, when hard_start_xmit
70 will be multithreaded we will have to use skb->encapsulation.
74 2. Networking dead loops would not kill routers, but would really
75 kill network. IP hop limit plays role of "t->recursion" in this case,
76 if we copy it from packet being encapsulated to upper header.
77 It is very good solution, but it introduces two problems:
79 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
80 do not work over tunnels.
81 - traceroute does not work. I planned to relay ICMP from tunnel,
82 so that this problem would be solved and traceroute output
83 would even more informative. This idea appeared to be wrong:
84 only Linux complies to rfc1812 now (yes, guys, Linux is the only
85 true router now :-)), all routers (at least, in neighbourhood of mine)
86 return only 8 bytes of payload. It is the end.
88 Hence, if we want that OSPF worked or traceroute said something reasonable,
89 we should search for another solution.
91 One of them is to parse packet trying to detect inner encapsulation
92 made by our node. It is difficult or even impossible, especially,
93 taking into account fragmentation. TO be short, tt is not solution at all.
95 Current solution: The solution was UNEXPECTEDLY SIMPLE.
96 We force DF flag on tunnels with preconfigured hop limit,
97 that is ALL. :-) Well, it does not remove the problem completely,
98 but exponential growth of network traffic is changed to linear
99 (branches, that exceed pmtu are pruned) and tunnel mtu
100 fastly degrades to value <68, where looping stops.
101 Yes, it is not good if there exists a router in the loop,
102 which does not force DF, even when encapsulating packets have DF set.
103 But it is not our problem! Nobody could accuse us, we made
104 all that we could make. Even if it is your gated who injected
105 fatal route to network, even if it were you who configured
106 fatal static route: you are innocent. :-)
110 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
111 practically identical code. It would be good to glue them
112 together, but it is not very evident, how to make them modular.
113 sit is integral part of IPv6, ipip and gre are naturally modular.
114 We could extract common parts (hash table, ioctl etc)
115 to a separate module (ip_tunnel.c).
120 static int ipgre_tunnel_init(struct net_device *dev);
121 static void ipgre_tunnel_setup(struct net_device *dev);
123 /* Fallback tunnel: no source, no destination, no key, no options */
125 static int ipgre_fb_tunnel_init(struct net_device *dev);
129 static int ipgre_net_id;
131 struct ip_tunnel *tunnels[4][HASH_SIZE];
133 struct net_device *fb_tunnel_dev;
136 /* Tunnel hash table */
146 We require exact key match i.e. if a key is present in packet
147 it will match only tunnel with the same key; if it is not present,
148 it will match only keyless tunnel.
150 All keysless packets, if not matched configured keyless tunnels
151 will match fallback tunnel.
154 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
156 #define tunnels_r_l tunnels[3]
157 #define tunnels_r tunnels[2]
158 #define tunnels_l tunnels[1]
159 #define tunnels_wc tunnels[0]
161 static DEFINE_RWLOCK(ipgre_lock);
163 /* Given src, dst and key, find appropriate for input tunnel. */
165 static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net,
166 __be32 remote, __be32 local, __be32 key)
168 unsigned h0 = HASH(remote);
169 unsigned h1 = HASH(key);
171 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
173 for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
174 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
175 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
179 for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
180 if (remote == t->parms.iph.daddr) {
181 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
185 for (t = ign->tunnels_l[h1]; t; t = t->next) {
186 if (local == t->parms.iph.saddr ||
187 (local == t->parms.iph.daddr &&
188 ipv4_is_multicast(local))) {
189 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
193 for (t = ign->tunnels_wc[h1]; t; t = t->next) {
194 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
198 if (ign->fb_tunnel_dev->flags&IFF_UP)
199 return netdev_priv(ign->fb_tunnel_dev);
203 static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
204 struct ip_tunnel_parm *parms)
206 __be32 remote = parms->iph.daddr;
207 __be32 local = parms->iph.saddr;
208 __be32 key = parms->i_key;
209 unsigned h = HASH(key);
214 if (remote && !ipv4_is_multicast(remote)) {
219 return &ign->tunnels[prio][h];
222 static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
225 return __ipgre_bucket(ign, &t->parms);
228 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
230 struct ip_tunnel **tp = ipgre_bucket(ign, t);
233 write_lock_bh(&ipgre_lock);
235 write_unlock_bh(&ipgre_lock);
238 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
240 struct ip_tunnel **tp;
242 for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
244 write_lock_bh(&ipgre_lock);
246 write_unlock_bh(&ipgre_lock);
252 static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
253 struct ip_tunnel_parm *parms, int create)
255 __be32 remote = parms->iph.daddr;
256 __be32 local = parms->iph.saddr;
257 __be32 key = parms->i_key;
258 struct ip_tunnel *t, **tp, *nt;
259 struct net_device *dev;
261 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
263 for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next) {
264 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
265 if (key == t->parms.i_key)
273 strlcpy(name, parms->name, IFNAMSIZ);
275 sprintf(name, "gre%%d");
277 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
281 dev_net_set(dev, net);
283 if (strchr(name, '%')) {
284 if (dev_alloc_name(dev, name) < 0)
288 dev->init = ipgre_tunnel_init;
289 nt = netdev_priv(dev);
292 if (register_netdevice(dev) < 0)
296 ipgre_tunnel_link(ign, nt);
304 static void ipgre_tunnel_uninit(struct net_device *dev)
306 struct net *net = dev_net(dev);
307 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
309 ipgre_tunnel_unlink(ign, netdev_priv(dev));
314 static void ipgre_err(struct sk_buff *skb, u32 info)
317 /* All the routers (except for Linux) return only
318 8 bytes of packet payload. It means, that precise relaying of
319 ICMP in the real Internet is absolutely infeasible.
321 Moreover, Cisco "wise men" put GRE key to the third word
322 in GRE header. It makes impossible maintaining even soft state for keyed
323 GRE tunnels with enabled checksum. Tell them "thank you".
325 Well, I wonder, rfc1812 was written by Cisco employee,
326 what the hell these idiots break standrads established
330 struct iphdr *iph = (struct iphdr*)skb->data;
331 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
332 int grehlen = (iph->ihl<<2) + 4;
333 const int type = icmp_hdr(skb)->type;
334 const int code = icmp_hdr(skb)->code;
339 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
340 if (flags&(GRE_VERSION|GRE_ROUTING))
349 /* If only 8 bytes returned, keyed message will be dropped here */
350 if (skb_headlen(skb) < grehlen)
355 case ICMP_PARAMETERPROB:
358 case ICMP_DEST_UNREACH:
361 case ICMP_PORT_UNREACH:
362 /* Impossible event. */
364 case ICMP_FRAG_NEEDED:
365 /* Soft state for pmtu is maintained by IP core. */
368 /* All others are translated to HOST_UNREACH.
369 rfc2003 contains "deep thoughts" about NET_UNREACH,
370 I believe they are just ether pollution. --ANK
375 case ICMP_TIME_EXCEEDED:
376 if (code != ICMP_EXC_TTL)
381 read_lock(&ipgre_lock);
382 t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr,
384 *(((__be32*)p) + (grehlen>>2) - 1) : 0);
385 if (t == NULL || t->parms.iph.daddr == 0 ||
386 ipv4_is_multicast(t->parms.iph.daddr))
389 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
392 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
396 t->err_time = jiffies;
398 read_unlock(&ipgre_lock);
402 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
404 if (INET_ECN_is_ce(iph->tos)) {
405 if (skb->protocol == htons(ETH_P_IP)) {
406 IP_ECN_set_ce(ip_hdr(skb));
407 } else if (skb->protocol == htons(ETH_P_IPV6)) {
408 IP6_ECN_set_ce(ipv6_hdr(skb));
414 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
417 if (skb->protocol == htons(ETH_P_IP))
418 inner = old_iph->tos;
419 else if (skb->protocol == htons(ETH_P_IPV6))
420 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
421 return INET_ECN_encapsulate(tos, inner);
424 static int ipgre_rcv(struct sk_buff *skb)
432 struct ip_tunnel *tunnel;
435 if (!pskb_may_pull(skb, 16))
442 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
443 /* - Version must be 0.
444 - We do not support routing headers.
446 if (flags&(GRE_VERSION|GRE_ROUTING))
449 if (flags&GRE_CSUM) {
450 switch (skb->ip_summed) {
451 case CHECKSUM_COMPLETE:
452 csum = csum_fold(skb->csum);
458 csum = __skb_checksum_complete(skb);
459 skb->ip_summed = CHECKSUM_COMPLETE;
464 key = *(__be32*)(h + offset);
468 seqno = ntohl(*(__be32*)(h + offset));
473 read_lock(&ipgre_lock);
474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
475 iph->saddr, iph->daddr, key)) != NULL) {
478 skb->protocol = *(__be16*)(h + 2);
479 /* WCCP version 1 and 2 protocol decoding.
480 * - Change protocol to IP
481 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
484 skb->protocol == htons(ETH_P_WCCP)) {
485 skb->protocol = htons(ETH_P_IP);
486 if ((*(h + offset) & 0xF0) != 0x40)
490 skb->mac_header = skb->network_header;
491 __pskb_pull(skb, offset);
492 skb_reset_network_header(skb);
493 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
494 skb->pkt_type = PACKET_HOST;
495 #ifdef CONFIG_NET_IPGRE_BROADCAST
496 if (ipv4_is_multicast(iph->daddr)) {
497 /* Looped back packet, drop it! */
498 if (skb->rtable->fl.iif == 0)
500 tunnel->stat.multicast++;
501 skb->pkt_type = PACKET_BROADCAST;
505 if (((flags&GRE_CSUM) && csum) ||
506 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
507 tunnel->stat.rx_crc_errors++;
508 tunnel->stat.rx_errors++;
511 if (tunnel->parms.i_flags&GRE_SEQ) {
512 if (!(flags&GRE_SEQ) ||
513 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
514 tunnel->stat.rx_fifo_errors++;
515 tunnel->stat.rx_errors++;
518 tunnel->i_seqno = seqno + 1;
520 tunnel->stat.rx_packets++;
521 tunnel->stat.rx_bytes += skb->len;
522 skb->dev = tunnel->dev;
523 dst_release(skb->dst);
526 ipgre_ecn_decapsulate(iph, skb);
528 read_unlock(&ipgre_lock);
531 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
534 read_unlock(&ipgre_lock);
540 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
542 struct ip_tunnel *tunnel = netdev_priv(dev);
543 struct net_device_stats *stats = &tunnel->stat;
544 struct iphdr *old_iph = ip_hdr(skb);
548 struct rtable *rt; /* Route to the other host */
549 struct net_device *tdev; /* Device to other host */
550 struct iphdr *iph; /* Our new IP header */
551 unsigned int max_headroom; /* The extra header space needed */
556 if (tunnel->recursion++) {
557 tunnel->stat.collisions++;
561 if (dev->header_ops) {
563 tiph = (struct iphdr*)skb->data;
565 gre_hlen = tunnel->hlen;
566 tiph = &tunnel->parms.iph;
569 if ((dst = tiph->daddr) == 0) {
572 if (skb->dst == NULL) {
573 tunnel->stat.tx_fifo_errors++;
577 if (skb->protocol == htons(ETH_P_IP)) {
579 if ((dst = rt->rt_gateway) == 0)
583 else if (skb->protocol == htons(ETH_P_IPV6)) {
584 struct in6_addr *addr6;
586 struct neighbour *neigh = skb->dst->neighbour;
591 addr6 = (struct in6_addr*)&neigh->primary_key;
592 addr_type = ipv6_addr_type(addr6);
594 if (addr_type == IPV6_ADDR_ANY) {
595 addr6 = &ipv6_hdr(skb)->daddr;
596 addr_type = ipv6_addr_type(addr6);
599 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
602 dst = addr6->s6_addr32[3];
611 if (skb->protocol == htons(ETH_P_IP))
617 struct flowi fl = { .oif = tunnel->parms.link,
620 .saddr = tiph->saddr,
621 .tos = RT_TOS(tos) } },
622 .proto = IPPROTO_GRE };
623 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
624 tunnel->stat.tx_carrier_errors++;
628 tdev = rt->u.dst.dev;
632 tunnel->stat.collisions++;
638 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
640 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
643 skb->dst->ops->update_pmtu(skb->dst, mtu);
645 if (skb->protocol == htons(ETH_P_IP)) {
646 df |= (old_iph->frag_off&htons(IP_DF));
648 if ((old_iph->frag_off&htons(IP_DF)) &&
649 mtu < ntohs(old_iph->tot_len)) {
650 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
656 else if (skb->protocol == htons(ETH_P_IPV6)) {
657 struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
659 if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
660 if ((tunnel->parms.iph.daddr &&
661 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
662 rt6->rt6i_dst.plen == 128) {
663 rt6->rt6i_flags |= RTF_MODIFIED;
664 skb->dst->metrics[RTAX_MTU-1] = mtu;
668 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
669 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
676 if (tunnel->err_count > 0) {
677 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
680 dst_link_failure(skb);
682 tunnel->err_count = 0;
685 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
687 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
688 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
689 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
698 skb_set_owner_w(new_skb, skb->sk);
701 old_iph = ip_hdr(skb);
704 skb->transport_header = skb->network_header;
705 skb_push(skb, gre_hlen);
706 skb_reset_network_header(skb);
707 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
708 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
710 dst_release(skb->dst);
711 skb->dst = &rt->u.dst;
714 * Push down and install the IPIP header.
719 iph->ihl = sizeof(struct iphdr) >> 2;
721 iph->protocol = IPPROTO_GRE;
722 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
723 iph->daddr = rt->rt_dst;
724 iph->saddr = rt->rt_src;
726 if ((iph->ttl = tiph->ttl) == 0) {
727 if (skb->protocol == htons(ETH_P_IP))
728 iph->ttl = old_iph->ttl;
730 else if (skb->protocol == htons(ETH_P_IPV6))
731 iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
734 iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
737 ((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
738 ((__be16*)(iph+1))[1] = skb->protocol;
740 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
741 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
743 if (tunnel->parms.o_flags&GRE_SEQ) {
745 *ptr = htonl(tunnel->o_seqno);
748 if (tunnel->parms.o_flags&GRE_KEY) {
749 *ptr = tunnel->parms.o_key;
752 if (tunnel->parms.o_flags&GRE_CSUM) {
754 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
765 dst_link_failure(skb);
774 static void ipgre_tunnel_bind_dev(struct net_device *dev)
776 struct net_device *tdev = NULL;
777 struct ip_tunnel *tunnel;
779 int hlen = LL_MAX_HEADER;
780 int mtu = ETH_DATA_LEN;
781 int addend = sizeof(struct iphdr) + 4;
783 tunnel = netdev_priv(dev);
784 iph = &tunnel->parms.iph;
786 /* Guess output device to choose reasonable mtu and hard_header_len */
789 struct flowi fl = { .oif = tunnel->parms.link,
791 { .daddr = iph->daddr,
793 .tos = RT_TOS(iph->tos) } },
794 .proto = IPPROTO_GRE };
796 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
797 tdev = rt->u.dst.dev;
800 dev->flags |= IFF_POINTOPOINT;
803 if (!tdev && tunnel->parms.link)
804 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
807 hlen = tdev->hard_header_len;
810 dev->iflink = tunnel->parms.link;
812 /* Precalculate GRE options length */
813 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
814 if (tunnel->parms.o_flags&GRE_CSUM)
816 if (tunnel->parms.o_flags&GRE_KEY)
818 if (tunnel->parms.o_flags&GRE_SEQ)
821 dev->hard_header_len = hlen + addend;
822 dev->mtu = mtu - addend;
823 tunnel->hlen = addend;
828 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
831 struct ip_tunnel_parm p;
833 struct net *net = dev_net(dev);
834 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
839 if (dev == ign->fb_tunnel_dev) {
840 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
844 t = ipgre_tunnel_locate(net, &p, 0);
847 t = netdev_priv(dev);
848 memcpy(&p, &t->parms, sizeof(p));
849 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
856 if (!capable(CAP_NET_ADMIN))
860 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
864 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
865 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
866 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
869 p.iph.frag_off |= htons(IP_DF);
871 if (!(p.i_flags&GRE_KEY))
873 if (!(p.o_flags&GRE_KEY))
876 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
878 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
887 t = netdev_priv(dev);
889 if (ipv4_is_multicast(p.iph.daddr))
890 nflags = IFF_BROADCAST;
891 else if (p.iph.daddr)
892 nflags = IFF_POINTOPOINT;
894 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
898 ipgre_tunnel_unlink(ign, t);
899 t->parms.iph.saddr = p.iph.saddr;
900 t->parms.iph.daddr = p.iph.daddr;
901 t->parms.i_key = p.i_key;
902 t->parms.o_key = p.o_key;
903 memcpy(dev->dev_addr, &p.iph.saddr, 4);
904 memcpy(dev->broadcast, &p.iph.daddr, 4);
905 ipgre_tunnel_link(ign, t);
906 netdev_state_change(dev);
912 if (cmd == SIOCCHGTUNNEL) {
913 t->parms.iph.ttl = p.iph.ttl;
914 t->parms.iph.tos = p.iph.tos;
915 t->parms.iph.frag_off = p.iph.frag_off;
916 if (t->parms.link != p.link) {
917 t->parms.link = p.link;
918 ipgre_tunnel_bind_dev(dev);
919 netdev_state_change(dev);
922 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
925 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
930 if (!capable(CAP_NET_ADMIN))
933 if (dev == ign->fb_tunnel_dev) {
935 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
938 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
941 if (t == netdev_priv(ign->fb_tunnel_dev))
945 unregister_netdevice(dev);
957 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
959 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
962 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
964 struct ip_tunnel *tunnel = netdev_priv(dev);
965 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
971 /* Nice toy. Unfortunately, useless in real life :-)
972 It allows to construct virtual multiprotocol broadcast "LAN"
973 over the Internet, provided multicast routing is tuned.
976 I have no idea was this bicycle invented before me,
977 so that I had to set ARPHRD_IPGRE to a random value.
978 I have an impression, that Cisco could make something similar,
979 but this feature is apparently missing in IOS<=11.2(8).
981 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
982 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
984 ping -t 255 224.66.66.66
986 If nobody answers, mbone does not work.
988 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
989 ip addr add 10.66.66.<somewhat>/24 dev Universe
991 ifconfig Universe add fe80::<Your_real_addr>/10
992 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
995 ftp fec0:6666:6666::193.233.7.65
1000 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1001 unsigned short type,
1002 const void *daddr, const void *saddr, unsigned len)
1004 struct ip_tunnel *t = netdev_priv(dev);
1005 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1006 __be16 *p = (__be16*)(iph+1);
1008 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1009 p[0] = t->parms.o_flags;
1013 * Set the source hardware address.
1017 memcpy(&iph->saddr, saddr, 4);
1020 memcpy(&iph->daddr, daddr, 4);
1023 if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1029 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1031 struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
1032 memcpy(haddr, &iph->saddr, 4);
1036 static const struct header_ops ipgre_header_ops = {
1037 .create = ipgre_header,
1038 .parse = ipgre_header_parse,
1041 #ifdef CONFIG_NET_IPGRE_BROADCAST
1042 static int ipgre_open(struct net_device *dev)
1044 struct ip_tunnel *t = netdev_priv(dev);
1046 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1047 struct flowi fl = { .oif = t->parms.link,
1049 { .daddr = t->parms.iph.daddr,
1050 .saddr = t->parms.iph.saddr,
1051 .tos = RT_TOS(t->parms.iph.tos) } },
1052 .proto = IPPROTO_GRE };
1054 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1055 return -EADDRNOTAVAIL;
1056 dev = rt->u.dst.dev;
1058 if (__in_dev_get_rtnl(dev) == NULL)
1059 return -EADDRNOTAVAIL;
1060 t->mlink = dev->ifindex;
1061 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1066 static int ipgre_close(struct net_device *dev)
1068 struct ip_tunnel *t = netdev_priv(dev);
1069 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1070 struct in_device *in_dev;
1071 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1073 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1082 static void ipgre_tunnel_setup(struct net_device *dev)
1084 dev->uninit = ipgre_tunnel_uninit;
1085 dev->destructor = free_netdev;
1086 dev->hard_start_xmit = ipgre_tunnel_xmit;
1087 dev->get_stats = ipgre_tunnel_get_stats;
1088 dev->do_ioctl = ipgre_tunnel_ioctl;
1089 dev->change_mtu = ipgre_tunnel_change_mtu;
1091 dev->type = ARPHRD_IPGRE;
1092 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1093 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1094 dev->flags = IFF_NOARP;
1097 dev->features |= NETIF_F_NETNS_LOCAL;
1100 static int ipgre_tunnel_init(struct net_device *dev)
1102 struct ip_tunnel *tunnel;
1105 tunnel = netdev_priv(dev);
1106 iph = &tunnel->parms.iph;
1109 strcpy(tunnel->parms.name, dev->name);
1111 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1112 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1114 ipgre_tunnel_bind_dev(dev);
1117 #ifdef CONFIG_NET_IPGRE_BROADCAST
1118 if (ipv4_is_multicast(iph->daddr)) {
1121 dev->flags = IFF_BROADCAST;
1122 dev->header_ops = &ipgre_header_ops;
1123 dev->open = ipgre_open;
1124 dev->stop = ipgre_close;
1128 dev->header_ops = &ipgre_header_ops;
1133 static int ipgre_fb_tunnel_init(struct net_device *dev)
1135 struct ip_tunnel *tunnel = netdev_priv(dev);
1136 struct iphdr *iph = &tunnel->parms.iph;
1137 struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1140 strcpy(tunnel->parms.name, dev->name);
1143 iph->protocol = IPPROTO_GRE;
1145 tunnel->hlen = sizeof(struct iphdr) + 4;
1148 ign->tunnels_wc[0] = tunnel;
1153 static struct net_protocol ipgre_protocol = {
1154 .handler = ipgre_rcv,
1155 .err_handler = ipgre_err,
1159 static void ipgre_destroy_tunnels(struct ipgre_net *ign)
1163 for (prio = 0; prio < 4; prio++) {
1165 for (h = 0; h < HASH_SIZE; h++) {
1166 struct ip_tunnel *t;
1167 while ((t = ign->tunnels[prio][h]) != NULL)
1168 unregister_netdevice(t->dev);
1173 static int ipgre_init_net(struct net *net)
1176 struct ipgre_net *ign;
1179 ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
1183 err = net_assign_generic(net, ipgre_net_id, ign);
1187 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1188 ipgre_tunnel_setup);
1189 if (!ign->fb_tunnel_dev) {
1194 ign->fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1195 dev_net_set(ign->fb_tunnel_dev, net);
1197 if ((err = register_netdev(ign->fb_tunnel_dev)))
1203 free_netdev(ign->fb_tunnel_dev);
1212 static void ipgre_exit_net(struct net *net)
1214 struct ipgre_net *ign;
1216 ign = net_generic(net, ipgre_net_id);
1218 ipgre_destroy_tunnels(ign);
1223 static struct pernet_operations ipgre_net_ops = {
1224 .init = ipgre_init_net,
1225 .exit = ipgre_exit_net,
1229 * And now the modules code and kernel interface.
1232 static int __init ipgre_init(void)
1236 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1238 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1239 printk(KERN_INFO "ipgre init: can't add protocol\n");
1243 err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
1245 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1250 static void __exit ipgre_fini(void)
1252 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1253 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1255 unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
1258 module_init(ipgre_init);
1259 module_exit(ipgre_fini);
1260 MODULE_LICENSE("GPL");