Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6] / drivers / net / tun.c
1 /*
2  *  TUN - Universal TUN/TAP device driver.
3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  *  GNU General Public License for more details.
14  *
15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16  */
17
18 /*
19  *  Changes:
20  *
21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22  *    Add TUNSETLINK ioctl to set the link encapsulation
23  *
24  *  Mark Smith <markzzzsmith@yahoo.com.au>
25  *    Use random_ether_addr() for tap MAC address.
26  *
27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
28  *    Fixes in packet dropping, queue length setting and queue wakeup.
29  *    Increased default tx queue length.
30  *    Added ethtool API.
31  *    Minor cleanups
32  *
33  *  Daniel Podlejski <underley@underley.eu.org>
34  *    Modifications for 2.3.99-pre5 kernel.
35  */
36
37 #define DRV_NAME        "tun"
38 #define DRV_VERSION     "1.6"
39 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
40 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
41
42 #include <linux/module.h>
43 #include <linux/errno.h>
44 #include <linux/kernel.h>
45 #include <linux/major.h>
46 #include <linux/slab.h>
47 #include <linux/smp_lock.h>
48 #include <linux/poll.h>
49 #include <linux/fcntl.h>
50 #include <linux/init.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/miscdevice.h>
55 #include <linux/ethtool.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/if.h>
58 #include <linux/if_arp.h>
59 #include <linux/if_ether.h>
60 #include <linux/if_tun.h>
61 #include <linux/crc32.h>
62 #include <linux/nsproxy.h>
63 #include <linux/virtio_net.h>
64 #include <net/net_namespace.h>
65 #include <net/netns/generic.h>
66 #include <net/rtnetlink.h>
67 #include <net/sock.h>
68
69 #include <asm/system.h>
70 #include <asm/uaccess.h>
71
72 /* Uncomment to enable debugging */
73 /* #define TUN_DEBUG 1 */
74
75 #ifdef TUN_DEBUG
76 static int debug;
77
78 #define DBG  if(tun->debug)printk
79 #define DBG1 if(debug==2)printk
80 #else
81 #define DBG( a... )
82 #define DBG1( a... )
83 #endif
84
85 #define FLT_EXACT_COUNT 8
86 struct tap_filter {
87         unsigned int    count;    /* Number of addrs. Zero means disabled */
88         u32             mask[2];  /* Mask of the hashed addrs */
89         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
90 };
91
92 struct tun_file {
93         atomic_t count;
94         struct tun_struct *tun;
95         struct net *net;
96 };
97
98 struct tun_sock;
99
100 struct tun_struct {
101         struct tun_file         *tfile;
102         unsigned int            flags;
103         uid_t                   owner;
104         gid_t                   group;
105
106         struct sk_buff_head     readq;
107
108         struct net_device       *dev;
109         struct fasync_struct    *fasync;
110
111         struct tap_filter       txflt;
112         struct sock             *sk;
113         struct socket           socket;
114
115 #ifdef TUN_DEBUG
116         int debug;
117 #endif
118 };
119
120 struct tun_sock {
121         struct sock             sk;
122         struct tun_struct       *tun;
123 };
124
125 static inline struct tun_sock *tun_sk(struct sock *sk)
126 {
127         return container_of(sk, struct tun_sock, sk);
128 }
129
130 static int tun_attach(struct tun_struct *tun, struct file *file)
131 {
132         struct tun_file *tfile = file->private_data;
133         const struct cred *cred = current_cred();
134         int err;
135
136         ASSERT_RTNL();
137
138         /* Check permissions */
139         if (((tun->owner != -1 && cred->euid != tun->owner) ||
140              (tun->group != -1 && !in_egroup_p(tun->group))) &&
141                 !capable(CAP_NET_ADMIN))
142                 return -EPERM;
143
144         netif_tx_lock_bh(tun->dev);
145
146         err = -EINVAL;
147         if (tfile->tun)
148                 goto out;
149
150         err = -EBUSY;
151         if (tun->tfile)
152                 goto out;
153
154         err = 0;
155         tfile->tun = tun;
156         tun->tfile = tfile;
157         dev_hold(tun->dev);
158         sock_hold(tun->sk);
159         atomic_inc(&tfile->count);
160
161 out:
162         netif_tx_unlock_bh(tun->dev);
163         return err;
164 }
165
166 static void __tun_detach(struct tun_struct *tun)
167 {
168         /* Detach from net device */
169         netif_tx_lock_bh(tun->dev);
170         tun->tfile = NULL;
171         netif_tx_unlock_bh(tun->dev);
172
173         /* Drop read queue */
174         skb_queue_purge(&tun->readq);
175
176         /* Drop the extra count on the net device */
177         dev_put(tun->dev);
178 }
179
180 static void tun_detach(struct tun_struct *tun)
181 {
182         rtnl_lock();
183         __tun_detach(tun);
184         rtnl_unlock();
185 }
186
187 static struct tun_struct *__tun_get(struct tun_file *tfile)
188 {
189         struct tun_struct *tun = NULL;
190
191         if (atomic_inc_not_zero(&tfile->count))
192                 tun = tfile->tun;
193
194         return tun;
195 }
196
197 static struct tun_struct *tun_get(struct file *file)
198 {
199         return __tun_get(file->private_data);
200 }
201
202 static void tun_put(struct tun_struct *tun)
203 {
204         struct tun_file *tfile = tun->tfile;
205
206         if (atomic_dec_and_test(&tfile->count))
207                 tun_detach(tfile->tun);
208 }
209
210 /* TAP filterting */
211 static void addr_hash_set(u32 *mask, const u8 *addr)
212 {
213         int n = ether_crc(ETH_ALEN, addr) >> 26;
214         mask[n >> 5] |= (1 << (n & 31));
215 }
216
217 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
218 {
219         int n = ether_crc(ETH_ALEN, addr) >> 26;
220         return mask[n >> 5] & (1 << (n & 31));
221 }
222
223 static int update_filter(struct tap_filter *filter, void __user *arg)
224 {
225         struct { u8 u[ETH_ALEN]; } *addr;
226         struct tun_filter uf;
227         int err, alen, n, nexact;
228
229         if (copy_from_user(&uf, arg, sizeof(uf)))
230                 return -EFAULT;
231
232         if (!uf.count) {
233                 /* Disabled */
234                 filter->count = 0;
235                 return 0;
236         }
237
238         alen = ETH_ALEN * uf.count;
239         addr = kmalloc(alen, GFP_KERNEL);
240         if (!addr)
241                 return -ENOMEM;
242
243         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
244                 err = -EFAULT;
245                 goto done;
246         }
247
248         /* The filter is updated without holding any locks. Which is
249          * perfectly safe. We disable it first and in the worst
250          * case we'll accept a few undesired packets. */
251         filter->count = 0;
252         wmb();
253
254         /* Use first set of addresses as an exact filter */
255         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
256                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
257
258         nexact = n;
259
260         /* Remaining multicast addresses are hashed,
261          * unicast will leave the filter disabled. */
262         memset(filter->mask, 0, sizeof(filter->mask));
263         for (; n < uf.count; n++) {
264                 if (!is_multicast_ether_addr(addr[n].u)) {
265                         err = 0; /* no filter */
266                         goto done;
267                 }
268                 addr_hash_set(filter->mask, addr[n].u);
269         }
270
271         /* For ALLMULTI just set the mask to all ones.
272          * This overrides the mask populated above. */
273         if ((uf.flags & TUN_FLT_ALLMULTI))
274                 memset(filter->mask, ~0, sizeof(filter->mask));
275
276         /* Now enable the filter */
277         wmb();
278         filter->count = nexact;
279
280         /* Return the number of exact filters */
281         err = nexact;
282
283 done:
284         kfree(addr);
285         return err;
286 }
287
288 /* Returns: 0 - drop, !=0 - accept */
289 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
290 {
291         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
292          * at this point. */
293         struct ethhdr *eh = (struct ethhdr *) skb->data;
294         int i;
295
296         /* Exact match */
297         for (i = 0; i < filter->count; i++)
298                 if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
299                         return 1;
300
301         /* Inexact match (multicast only) */
302         if (is_multicast_ether_addr(eh->h_dest))
303                 return addr_hash_test(filter->mask, eh->h_dest);
304
305         return 0;
306 }
307
308 /*
309  * Checks whether the packet is accepted or not.
310  * Returns: 0 - drop, !=0 - accept
311  */
312 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
313 {
314         if (!filter->count)
315                 return 1;
316
317         return run_filter(filter, skb);
318 }
319
320 /* Network device part of the driver */
321
322 static const struct ethtool_ops tun_ethtool_ops;
323
324 /* Net device detach from fd. */
325 static void tun_net_uninit(struct net_device *dev)
326 {
327         struct tun_struct *tun = netdev_priv(dev);
328         struct tun_file *tfile = tun->tfile;
329
330         /* Inform the methods they need to stop using the dev.
331          */
332         if (tfile) {
333                 wake_up_all(&tun->socket.wait);
334                 if (atomic_dec_and_test(&tfile->count))
335                         __tun_detach(tun);
336         }
337 }
338
339 static void tun_free_netdev(struct net_device *dev)
340 {
341         struct tun_struct *tun = netdev_priv(dev);
342
343         sock_put(tun->sk);
344 }
345
346 /* Net device open. */
347 static int tun_net_open(struct net_device *dev)
348 {
349         netif_start_queue(dev);
350         return 0;
351 }
352
353 /* Net device close. */
354 static int tun_net_close(struct net_device *dev)
355 {
356         netif_stop_queue(dev);
357         return 0;
358 }
359
360 /* Net device start xmit */
361 static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
362 {
363         struct tun_struct *tun = netdev_priv(dev);
364
365         DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
366
367         /* Drop packet if interface is not attached */
368         if (!tun->tfile)
369                 goto drop;
370
371         /* Drop if the filter does not like it.
372          * This is a noop if the filter is disabled.
373          * Filter can be enabled only for the TAP devices. */
374         if (!check_filter(&tun->txflt, skb))
375                 goto drop;
376
377         if (skb_queue_len(&tun->readq) >= dev->tx_queue_len) {
378                 if (!(tun->flags & TUN_ONE_QUEUE)) {
379                         /* Normal queueing mode. */
380                         /* Packet scheduler handles dropping of further packets. */
381                         netif_stop_queue(dev);
382
383                         /* We won't see all dropped packets individually, so overrun
384                          * error is more appropriate. */
385                         dev->stats.tx_fifo_errors++;
386                 } else {
387                         /* Single queue mode.
388                          * Driver handles dropping of all packets itself. */
389                         goto drop;
390                 }
391         }
392
393         /* Enqueue packet */
394         skb_queue_tail(&tun->readq, skb);
395         dev->trans_start = jiffies;
396
397         /* Notify and wake up reader process */
398         if (tun->flags & TUN_FASYNC)
399                 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
400         wake_up_interruptible(&tun->socket.wait);
401         return 0;
402
403 drop:
404         dev->stats.tx_dropped++;
405         kfree_skb(skb);
406         return 0;
407 }
408
409 static void tun_net_mclist(struct net_device *dev)
410 {
411         /*
412          * This callback is supposed to deal with mc filter in
413          * _rx_ path and has nothing to do with the _tx_ path.
414          * In rx path we always accept everything userspace gives us.
415          */
416         return;
417 }
418
419 #define MIN_MTU 68
420 #define MAX_MTU 65535
421
422 static int
423 tun_net_change_mtu(struct net_device *dev, int new_mtu)
424 {
425         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
426                 return -EINVAL;
427         dev->mtu = new_mtu;
428         return 0;
429 }
430
431 static const struct net_device_ops tun_netdev_ops = {
432         .ndo_uninit             = tun_net_uninit,
433         .ndo_open               = tun_net_open,
434         .ndo_stop               = tun_net_close,
435         .ndo_start_xmit         = tun_net_xmit,
436         .ndo_change_mtu         = tun_net_change_mtu,
437 };
438
439 static const struct net_device_ops tap_netdev_ops = {
440         .ndo_uninit             = tun_net_uninit,
441         .ndo_open               = tun_net_open,
442         .ndo_stop               = tun_net_close,
443         .ndo_start_xmit         = tun_net_xmit,
444         .ndo_change_mtu         = tun_net_change_mtu,
445         .ndo_set_multicast_list = tun_net_mclist,
446         .ndo_set_mac_address    = eth_mac_addr,
447         .ndo_validate_addr      = eth_validate_addr,
448 };
449
450 /* Initialize net device. */
451 static void tun_net_init(struct net_device *dev)
452 {
453         struct tun_struct *tun = netdev_priv(dev);
454
455         switch (tun->flags & TUN_TYPE_MASK) {
456         case TUN_TUN_DEV:
457                 dev->netdev_ops = &tun_netdev_ops;
458
459                 /* Point-to-Point TUN Device */
460                 dev->hard_header_len = 0;
461                 dev->addr_len = 0;
462                 dev->mtu = 1500;
463
464                 /* Zero header length */
465                 dev->type = ARPHRD_NONE;
466                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
467                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
468                 break;
469
470         case TUN_TAP_DEV:
471                 dev->netdev_ops = &tap_netdev_ops;
472                 /* Ethernet TAP Device */
473                 ether_setup(dev);
474
475                 random_ether_addr(dev->dev_addr);
476
477                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
478                 break;
479         }
480 }
481
482 /* Character device part */
483
484 /* Poll */
485 static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
486 {
487         struct tun_file *tfile = file->private_data;
488         struct tun_struct *tun = __tun_get(tfile);
489         struct sock *sk = tun->sk;
490         unsigned int mask = 0;
491
492         if (!tun)
493                 return POLLERR;
494
495         DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
496
497         poll_wait(file, &tun->socket.wait, wait);
498
499         if (!skb_queue_empty(&tun->readq))
500                 mask |= POLLIN | POLLRDNORM;
501
502         if (sock_writeable(sk) ||
503             (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
504              sock_writeable(sk)))
505                 mask |= POLLOUT | POLLWRNORM;
506
507         if (tun->dev->reg_state != NETREG_REGISTERED)
508                 mask = POLLERR;
509
510         tun_put(tun);
511         return mask;
512 }
513
514 /* prepad is the amount to reserve at front.  len is length after that.
515  * linear is a hint as to how much to copy (usually headers). */
516 static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
517                                             size_t prepad, size_t len,
518                                             size_t linear, int noblock)
519 {
520         struct sock *sk = tun->sk;
521         struct sk_buff *skb;
522         int err;
523
524         /* Under a page?  Don't bother with paged skb. */
525         if (prepad + len < PAGE_SIZE || !linear)
526                 linear = len;
527
528         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
529                                    &err);
530         if (!skb)
531                 return ERR_PTR(err);
532
533         skb_reserve(skb, prepad);
534         skb_put(skb, linear);
535         skb->data_len = len - linear;
536         skb->len += len - linear;
537
538         return skb;
539 }
540
541 /* Get packet from user space buffer */
542 static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
543                                        struct iovec *iv, size_t count,
544                                        int noblock)
545 {
546         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
547         struct sk_buff *skb;
548         size_t len = count, align = 0;
549         struct virtio_net_hdr gso = { 0 };
550
551         if (!(tun->flags & TUN_NO_PI)) {
552                 if ((len -= sizeof(pi)) > count)
553                         return -EINVAL;
554
555                 if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
556                         return -EFAULT;
557         }
558
559         if (tun->flags & TUN_VNET_HDR) {
560                 if ((len -= sizeof(gso)) > count)
561                         return -EINVAL;
562
563                 if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso)))
564                         return -EFAULT;
565
566                 if (gso.hdr_len > len)
567                         return -EINVAL;
568         }
569
570         if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
571                 align = NET_IP_ALIGN;
572                 if (unlikely(len < ETH_HLEN ||
573                              (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
574                         return -EINVAL;
575         }
576
577         skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
578         if (IS_ERR(skb)) {
579                 if (PTR_ERR(skb) != -EAGAIN)
580                         tun->dev->stats.rx_dropped++;
581                 return PTR_ERR(skb);
582         }
583
584         if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
585                 tun->dev->stats.rx_dropped++;
586                 kfree_skb(skb);
587                 return -EFAULT;
588         }
589
590         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
591                 if (!skb_partial_csum_set(skb, gso.csum_start,
592                                           gso.csum_offset)) {
593                         tun->dev->stats.rx_frame_errors++;
594                         kfree_skb(skb);
595                         return -EINVAL;
596                 }
597         } else if (tun->flags & TUN_NOCHECKSUM)
598                 skb->ip_summed = CHECKSUM_UNNECESSARY;
599
600         switch (tun->flags & TUN_TYPE_MASK) {
601         case TUN_TUN_DEV:
602                 if (tun->flags & TUN_NO_PI) {
603                         switch (skb->data[0] & 0xf0) {
604                         case 0x40:
605                                 pi.proto = htons(ETH_P_IP);
606                                 break;
607                         case 0x60:
608                                 pi.proto = htons(ETH_P_IPV6);
609                                 break;
610                         default:
611                                 tun->dev->stats.rx_dropped++;
612                                 kfree_skb(skb);
613                                 return -EINVAL;
614                         }
615                 }
616
617                 skb_reset_mac_header(skb);
618                 skb->protocol = pi.proto;
619                 skb->dev = tun->dev;
620                 break;
621         case TUN_TAP_DEV:
622                 skb->protocol = eth_type_trans(skb, tun->dev);
623                 break;
624         };
625
626         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
627                 pr_debug("GSO!\n");
628                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
629                 case VIRTIO_NET_HDR_GSO_TCPV4:
630                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
631                         break;
632                 case VIRTIO_NET_HDR_GSO_TCPV6:
633                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
634                         break;
635                 default:
636                         tun->dev->stats.rx_frame_errors++;
637                         kfree_skb(skb);
638                         return -EINVAL;
639                 }
640
641                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
642                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
643
644                 skb_shinfo(skb)->gso_size = gso.gso_size;
645                 if (skb_shinfo(skb)->gso_size == 0) {
646                         tun->dev->stats.rx_frame_errors++;
647                         kfree_skb(skb);
648                         return -EINVAL;
649                 }
650
651                 /* Header must be checked, and gso_segs computed. */
652                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
653                 skb_shinfo(skb)->gso_segs = 0;
654         }
655
656         netif_rx_ni(skb);
657
658         tun->dev->stats.rx_packets++;
659         tun->dev->stats.rx_bytes += len;
660
661         return count;
662 }
663
664 static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
665                               unsigned long count, loff_t pos)
666 {
667         struct file *file = iocb->ki_filp;
668         struct tun_struct *tun = tun_get(file);
669         ssize_t result;
670
671         if (!tun)
672                 return -EBADFD;
673
674         DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
675
676         result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count),
677                               file->f_flags & O_NONBLOCK);
678
679         tun_put(tun);
680         return result;
681 }
682
683 /* Put packet to the user space buffer */
684 static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
685                                        struct sk_buff *skb,
686                                        struct iovec *iv, int len)
687 {
688         struct tun_pi pi = { 0, skb->protocol };
689         ssize_t total = 0;
690
691         if (!(tun->flags & TUN_NO_PI)) {
692                 if ((len -= sizeof(pi)) < 0)
693                         return -EINVAL;
694
695                 if (len < skb->len) {
696                         /* Packet will be striped */
697                         pi.flags |= TUN_PKT_STRIP;
698                 }
699
700                 if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))
701                         return -EFAULT;
702                 total += sizeof(pi);
703         }
704
705         if (tun->flags & TUN_VNET_HDR) {
706                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
707                 if ((len -= sizeof(gso)) < 0)
708                         return -EINVAL;
709
710                 if (skb_is_gso(skb)) {
711                         struct skb_shared_info *sinfo = skb_shinfo(skb);
712
713                         /* This is a hint as to how much should be linear. */
714                         gso.hdr_len = skb_headlen(skb);
715                         gso.gso_size = sinfo->gso_size;
716                         if (sinfo->gso_type & SKB_GSO_TCPV4)
717                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
718                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
719                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
720                         else
721                                 BUG();
722                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
723                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
724                 } else
725                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
726
727                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
728                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
729                         gso.csum_start = skb->csum_start - skb_headroom(skb);
730                         gso.csum_offset = skb->csum_offset;
731                 } /* else everything is zero */
732
733                 if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso))))
734                         return -EFAULT;
735                 total += sizeof(gso);
736         }
737
738         len = min_t(int, skb->len, len);
739
740         skb_copy_datagram_iovec(skb, 0, iv, len);
741         total += len;
742
743         tun->dev->stats.tx_packets++;
744         tun->dev->stats.tx_bytes += len;
745
746         return total;
747 }
748
749 static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
750                             unsigned long count, loff_t pos)
751 {
752         struct file *file = iocb->ki_filp;
753         struct tun_file *tfile = file->private_data;
754         struct tun_struct *tun = __tun_get(tfile);
755         DECLARE_WAITQUEUE(wait, current);
756         struct sk_buff *skb;
757         ssize_t len, ret = 0;
758
759         if (!tun)
760                 return -EBADFD;
761
762         DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
763
764         len = iov_length(iv, count);
765         if (len < 0) {
766                 ret = -EINVAL;
767                 goto out;
768         }
769
770         add_wait_queue(&tun->socket.wait, &wait);
771         while (len) {
772                 current->state = TASK_INTERRUPTIBLE;
773
774                 /* Read frames from the queue */
775                 if (!(skb=skb_dequeue(&tun->readq))) {
776                         if (file->f_flags & O_NONBLOCK) {
777                                 ret = -EAGAIN;
778                                 break;
779                         }
780                         if (signal_pending(current)) {
781                                 ret = -ERESTARTSYS;
782                                 break;
783                         }
784                         if (tun->dev->reg_state != NETREG_REGISTERED) {
785                                 ret = -EIO;
786                                 break;
787                         }
788
789                         /* Nothing to read, let's sleep */
790                         schedule();
791                         continue;
792                 }
793                 netif_wake_queue(tun->dev);
794
795                 ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
796                 kfree_skb(skb);
797                 break;
798         }
799
800         current->state = TASK_RUNNING;
801         remove_wait_queue(&tun->socket.wait, &wait);
802
803 out:
804         tun_put(tun);
805         return ret;
806 }
807
808 static void tun_setup(struct net_device *dev)
809 {
810         struct tun_struct *tun = netdev_priv(dev);
811
812         skb_queue_head_init(&tun->readq);
813
814         tun->owner = -1;
815         tun->group = -1;
816
817         dev->ethtool_ops = &tun_ethtool_ops;
818         dev->destructor = tun_free_netdev;
819 }
820
821 /* Trivial set of netlink ops to allow deleting tun or tap
822  * device with netlink.
823  */
824 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
825 {
826         return -EINVAL;
827 }
828
829 static struct rtnl_link_ops tun_link_ops __read_mostly = {
830         .kind           = DRV_NAME,
831         .priv_size      = sizeof(struct tun_struct),
832         .setup          = tun_setup,
833         .validate       = tun_validate,
834 };
835
836 static void tun_sock_write_space(struct sock *sk)
837 {
838         struct tun_struct *tun;
839
840         if (!sock_writeable(sk))
841                 return;
842
843         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
844                 wake_up_interruptible_sync(sk->sk_sleep);
845
846         if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
847                 return;
848
849         tun = container_of(sk, struct tun_sock, sk)->tun;
850         kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
851 }
852
853 static void tun_sock_destruct(struct sock *sk)
854 {
855         free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
856 }
857
858 static struct proto tun_proto = {
859         .name           = "tun",
860         .owner          = THIS_MODULE,
861         .obj_size       = sizeof(struct tun_sock),
862 };
863
864 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
865 {
866         struct sock *sk;
867         struct tun_struct *tun;
868         struct net_device *dev;
869         int err;
870
871         dev = __dev_get_by_name(net, ifr->ifr_name);
872         if (dev) {
873                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
874                         tun = netdev_priv(dev);
875                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
876                         tun = netdev_priv(dev);
877                 else
878                         return -EINVAL;
879
880                 err = tun_attach(tun, file);
881                 if (err < 0)
882                         return err;
883         }
884         else {
885                 char *name;
886                 unsigned long flags = 0;
887
888                 err = -EINVAL;
889
890                 if (!capable(CAP_NET_ADMIN))
891                         return -EPERM;
892
893                 /* Set dev type */
894                 if (ifr->ifr_flags & IFF_TUN) {
895                         /* TUN device */
896                         flags |= TUN_TUN_DEV;
897                         name = "tun%d";
898                 } else if (ifr->ifr_flags & IFF_TAP) {
899                         /* TAP device */
900                         flags |= TUN_TAP_DEV;
901                         name = "tap%d";
902                 } else
903                         goto failed;
904
905                 if (*ifr->ifr_name)
906                         name = ifr->ifr_name;
907
908                 dev = alloc_netdev(sizeof(struct tun_struct), name,
909                                    tun_setup);
910                 if (!dev)
911                         return -ENOMEM;
912
913                 dev_net_set(dev, net);
914                 dev->rtnl_link_ops = &tun_link_ops;
915
916                 tun = netdev_priv(dev);
917                 tun->dev = dev;
918                 tun->flags = flags;
919                 tun->txflt.count = 0;
920
921                 err = -ENOMEM;
922                 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
923                 if (!sk)
924                         goto err_free_dev;
925
926                 init_waitqueue_head(&tun->socket.wait);
927                 sock_init_data(&tun->socket, sk);
928                 sk->sk_write_space = tun_sock_write_space;
929                 sk->sk_sndbuf = INT_MAX;
930
931                 tun->sk = sk;
932                 container_of(sk, struct tun_sock, sk)->tun = tun;
933
934                 tun_net_init(dev);
935
936                 if (strchr(dev->name, '%')) {
937                         err = dev_alloc_name(dev, dev->name);
938                         if (err < 0)
939                                 goto err_free_sk;
940                 }
941
942                 err = -EINVAL;
943                 err = register_netdevice(tun->dev);
944                 if (err < 0)
945                         goto err_free_sk;
946
947                 sk->sk_destruct = tun_sock_destruct;
948
949                 err = tun_attach(tun, file);
950                 if (err < 0)
951                         goto failed;
952         }
953
954         DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
955
956         if (ifr->ifr_flags & IFF_NO_PI)
957                 tun->flags |= TUN_NO_PI;
958         else
959                 tun->flags &= ~TUN_NO_PI;
960
961         if (ifr->ifr_flags & IFF_ONE_QUEUE)
962                 tun->flags |= TUN_ONE_QUEUE;
963         else
964                 tun->flags &= ~TUN_ONE_QUEUE;
965
966         if (ifr->ifr_flags & IFF_VNET_HDR)
967                 tun->flags |= TUN_VNET_HDR;
968         else
969                 tun->flags &= ~TUN_VNET_HDR;
970
971         /* Make sure persistent devices do not get stuck in
972          * xoff state.
973          */
974         if (netif_running(tun->dev))
975                 netif_wake_queue(tun->dev);
976
977         strcpy(ifr->ifr_name, tun->dev->name);
978         return 0;
979
980  err_free_sk:
981         sock_put(sk);
982  err_free_dev:
983         free_netdev(dev);
984  failed:
985         return err;
986 }
987
988 static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
989 {
990         struct tun_struct *tun = tun_get(file);
991
992         if (!tun)
993                 return -EBADFD;
994
995         DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
996
997         strcpy(ifr->ifr_name, tun->dev->name);
998
999         ifr->ifr_flags = 0;
1000
1001         if (ifr->ifr_flags & TUN_TUN_DEV)
1002                 ifr->ifr_flags |= IFF_TUN;
1003         else
1004                 ifr->ifr_flags |= IFF_TAP;
1005
1006         if (tun->flags & TUN_NO_PI)
1007                 ifr->ifr_flags |= IFF_NO_PI;
1008
1009         if (tun->flags & TUN_ONE_QUEUE)
1010                 ifr->ifr_flags |= IFF_ONE_QUEUE;
1011
1012         if (tun->flags & TUN_VNET_HDR)
1013                 ifr->ifr_flags |= IFF_VNET_HDR;
1014
1015         tun_put(tun);
1016         return 0;
1017 }
1018
1019 /* This is like a cut-down ethtool ops, except done via tun fd so no
1020  * privs required. */
1021 static int set_offload(struct net_device *dev, unsigned long arg)
1022 {
1023         unsigned int old_features, features;
1024
1025         old_features = dev->features;
1026         /* Unset features, set them as we chew on the arg. */
1027         features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST
1028                                     |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6));
1029
1030         if (arg & TUN_F_CSUM) {
1031                 features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1032                 arg &= ~TUN_F_CSUM;
1033
1034                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1035                         if (arg & TUN_F_TSO_ECN) {
1036                                 features |= NETIF_F_TSO_ECN;
1037                                 arg &= ~TUN_F_TSO_ECN;
1038                         }
1039                         if (arg & TUN_F_TSO4)
1040                                 features |= NETIF_F_TSO;
1041                         if (arg & TUN_F_TSO6)
1042                                 features |= NETIF_F_TSO6;
1043                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1044                 }
1045         }
1046
1047         /* This gives the user a way to test for new features in future by
1048          * trying to set them. */
1049         if (arg)
1050                 return -EINVAL;
1051
1052         dev->features = features;
1053         if (old_features != dev->features)
1054                 netdev_features_change(dev);
1055
1056         return 0;
1057 }
1058
1059 static int tun_chr_ioctl(struct inode *inode, struct file *file,
1060                          unsigned int cmd, unsigned long arg)
1061 {
1062         struct tun_file *tfile = file->private_data;
1063         struct tun_struct *tun;
1064         void __user* argp = (void __user*)arg;
1065         struct ifreq ifr;
1066         int sndbuf;
1067         int ret;
1068
1069         if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1070                 if (copy_from_user(&ifr, argp, sizeof ifr))
1071                         return -EFAULT;
1072
1073         if (cmd == TUNGETFEATURES) {
1074                 /* Currently this just means: "what IFF flags are valid?".
1075                  * This is needed because we never checked for invalid flags on
1076                  * TUNSETIFF. */
1077                 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1078                                 IFF_VNET_HDR,
1079                                 (unsigned int __user*)argp);
1080         }
1081
1082         tun = __tun_get(tfile);
1083         if (cmd == TUNSETIFF && !tun) {
1084                 int err;
1085
1086                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1087
1088                 rtnl_lock();
1089                 err = tun_set_iff(tfile->net, file, &ifr);
1090                 rtnl_unlock();
1091
1092                 if (err)
1093                         return err;
1094
1095                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1096                         return -EFAULT;
1097                 return 0;
1098         }
1099
1100
1101         if (!tun)
1102                 return -EBADFD;
1103
1104         DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
1105
1106         ret = 0;
1107         switch (cmd) {
1108         case TUNGETIFF:
1109                 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
1110                 if (ret)
1111                         break;
1112
1113                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1114                         ret = -EFAULT;
1115                 break;
1116
1117         case TUNSETNOCSUM:
1118                 /* Disable/Enable checksum */
1119                 if (arg)
1120                         tun->flags |= TUN_NOCHECKSUM;
1121                 else
1122                         tun->flags &= ~TUN_NOCHECKSUM;
1123
1124                 DBG(KERN_INFO "%s: checksum %s\n",
1125                     tun->dev->name, arg ? "disabled" : "enabled");
1126                 break;
1127
1128         case TUNSETPERSIST:
1129                 /* Disable/Enable persist mode */
1130                 if (arg)
1131                         tun->flags |= TUN_PERSIST;
1132                 else
1133                         tun->flags &= ~TUN_PERSIST;
1134
1135                 DBG(KERN_INFO "%s: persist %s\n",
1136                     tun->dev->name, arg ? "enabled" : "disabled");
1137                 break;
1138
1139         case TUNSETOWNER:
1140                 /* Set owner of the device */
1141                 tun->owner = (uid_t) arg;
1142
1143                 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
1144                 break;
1145
1146         case TUNSETGROUP:
1147                 /* Set group of the device */
1148                 tun->group= (gid_t) arg;
1149
1150                 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
1151                 break;
1152
1153         case TUNSETLINK:
1154                 /* Only allow setting the type when the interface is down */
1155                 rtnl_lock();
1156                 if (tun->dev->flags & IFF_UP) {
1157                         DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
1158                                 tun->dev->name);
1159                         ret = -EBUSY;
1160                 } else {
1161                         tun->dev->type = (int) arg;
1162                         DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
1163                         ret = 0;
1164                 }
1165                 rtnl_unlock();
1166                 break;
1167
1168 #ifdef TUN_DEBUG
1169         case TUNSETDEBUG:
1170                 tun->debug = arg;
1171                 break;
1172 #endif
1173         case TUNSETOFFLOAD:
1174                 rtnl_lock();
1175                 ret = set_offload(tun->dev, arg);
1176                 rtnl_unlock();
1177                 break;
1178
1179         case TUNSETTXFILTER:
1180                 /* Can be set only for TAPs */
1181                 ret = -EINVAL;
1182                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1183                         break;
1184                 rtnl_lock();
1185                 ret = update_filter(&tun->txflt, (void __user *)arg);
1186                 rtnl_unlock();
1187                 break;
1188
1189         case SIOCGIFHWADDR:
1190                 /* Get hw addres */
1191                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1192                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1193                 if (copy_to_user(argp, &ifr, sizeof ifr))
1194                         ret = -EFAULT;
1195                 break;
1196
1197         case SIOCSIFHWADDR:
1198                 /* Set hw address */
1199                 DBG(KERN_DEBUG "%s: set hw address: %pM\n",
1200                         tun->dev->name, ifr.ifr_hwaddr.sa_data);
1201
1202                 rtnl_lock();
1203                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1204                 rtnl_unlock();
1205                 break;
1206
1207         case TUNGETSNDBUF:
1208                 sndbuf = tun->sk->sk_sndbuf;
1209                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1210                         ret = -EFAULT;
1211                 break;
1212
1213         case TUNSETSNDBUF:
1214                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1215                         ret = -EFAULT;
1216                         break;
1217                 }
1218
1219                 tun->sk->sk_sndbuf = sndbuf;
1220                 break;
1221
1222         default:
1223                 ret = -EINVAL;
1224                 break;
1225         };
1226
1227         tun_put(tun);
1228         return ret;
1229 }
1230
1231 static int tun_chr_fasync(int fd, struct file *file, int on)
1232 {
1233         struct tun_struct *tun = tun_get(file);
1234         int ret;
1235
1236         if (!tun)
1237                 return -EBADFD;
1238
1239         DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
1240
1241         lock_kernel();
1242         if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1243                 goto out;
1244
1245         if (on) {
1246                 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1247                 if (ret)
1248                         goto out;
1249                 tun->flags |= TUN_FASYNC;
1250         } else
1251                 tun->flags &= ~TUN_FASYNC;
1252         ret = 0;
1253 out:
1254         unlock_kernel();
1255         tun_put(tun);
1256         return ret;
1257 }
1258
1259 static int tun_chr_open(struct inode *inode, struct file * file)
1260 {
1261         struct tun_file *tfile;
1262         cycle_kernel_lock();
1263         DBG1(KERN_INFO "tunX: tun_chr_open\n");
1264
1265         tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1266         if (!tfile)
1267                 return -ENOMEM;
1268         atomic_set(&tfile->count, 0);
1269         tfile->tun = NULL;
1270         tfile->net = get_net(current->nsproxy->net_ns);
1271         file->private_data = tfile;
1272         return 0;
1273 }
1274
1275 static int tun_chr_close(struct inode *inode, struct file *file)
1276 {
1277         struct tun_file *tfile = file->private_data;
1278         struct tun_struct *tun = __tun_get(tfile);
1279
1280
1281         if (tun) {
1282                 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1283
1284                 rtnl_lock();
1285                 __tun_detach(tun);
1286
1287                 /* If desireable, unregister the netdevice. */
1288                 if (!(tun->flags & TUN_PERSIST))
1289                         unregister_netdevice(tun->dev);
1290
1291                 rtnl_unlock();
1292         }
1293
1294         tun = tfile->tun;
1295         if (tun)
1296                 sock_put(tun->sk);
1297
1298         put_net(tfile->net);
1299         kfree(tfile);
1300
1301         return 0;
1302 }
1303
1304 static const struct file_operations tun_fops = {
1305         .owner  = THIS_MODULE,
1306         .llseek = no_llseek,
1307         .read  = do_sync_read,
1308         .aio_read  = tun_chr_aio_read,
1309         .write = do_sync_write,
1310         .aio_write = tun_chr_aio_write,
1311         .poll   = tun_chr_poll,
1312         .ioctl  = tun_chr_ioctl,
1313         .open   = tun_chr_open,
1314         .release = tun_chr_close,
1315         .fasync = tun_chr_fasync
1316 };
1317
1318 static struct miscdevice tun_miscdev = {
1319         .minor = TUN_MINOR,
1320         .name = "tun",
1321         .fops = &tun_fops,
1322 };
1323
1324 /* ethtool interface */
1325
1326 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1327 {
1328         cmd->supported          = 0;
1329         cmd->advertising        = 0;
1330         cmd->speed              = SPEED_10;
1331         cmd->duplex             = DUPLEX_FULL;
1332         cmd->port               = PORT_TP;
1333         cmd->phy_address        = 0;
1334         cmd->transceiver        = XCVR_INTERNAL;
1335         cmd->autoneg            = AUTONEG_DISABLE;
1336         cmd->maxtxpkt           = 0;
1337         cmd->maxrxpkt           = 0;
1338         return 0;
1339 }
1340
1341 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1342 {
1343         struct tun_struct *tun = netdev_priv(dev);
1344
1345         strcpy(info->driver, DRV_NAME);
1346         strcpy(info->version, DRV_VERSION);
1347         strcpy(info->fw_version, "N/A");
1348
1349         switch (tun->flags & TUN_TYPE_MASK) {
1350         case TUN_TUN_DEV:
1351                 strcpy(info->bus_info, "tun");
1352                 break;
1353         case TUN_TAP_DEV:
1354                 strcpy(info->bus_info, "tap");
1355                 break;
1356         }
1357 }
1358
1359 static u32 tun_get_msglevel(struct net_device *dev)
1360 {
1361 #ifdef TUN_DEBUG
1362         struct tun_struct *tun = netdev_priv(dev);
1363         return tun->debug;
1364 #else
1365         return -EOPNOTSUPP;
1366 #endif
1367 }
1368
1369 static void tun_set_msglevel(struct net_device *dev, u32 value)
1370 {
1371 #ifdef TUN_DEBUG
1372         struct tun_struct *tun = netdev_priv(dev);
1373         tun->debug = value;
1374 #endif
1375 }
1376
1377 static u32 tun_get_link(struct net_device *dev)
1378 {
1379         struct tun_struct *tun = netdev_priv(dev);
1380         return !!tun->tfile;
1381 }
1382
1383 static u32 tun_get_rx_csum(struct net_device *dev)
1384 {
1385         struct tun_struct *tun = netdev_priv(dev);
1386         return (tun->flags & TUN_NOCHECKSUM) == 0;
1387 }
1388
1389 static int tun_set_rx_csum(struct net_device *dev, u32 data)
1390 {
1391         struct tun_struct *tun = netdev_priv(dev);
1392         if (data)
1393                 tun->flags &= ~TUN_NOCHECKSUM;
1394         else
1395                 tun->flags |= TUN_NOCHECKSUM;
1396         return 0;
1397 }
1398
1399 static const struct ethtool_ops tun_ethtool_ops = {
1400         .get_settings   = tun_get_settings,
1401         .get_drvinfo    = tun_get_drvinfo,
1402         .get_msglevel   = tun_get_msglevel,
1403         .set_msglevel   = tun_set_msglevel,
1404         .get_link       = tun_get_link,
1405         .get_rx_csum    = tun_get_rx_csum,
1406         .set_rx_csum    = tun_set_rx_csum
1407 };
1408
1409
1410 static int __init tun_init(void)
1411 {
1412         int ret = 0;
1413
1414         printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1415         printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
1416
1417         ret = rtnl_link_register(&tun_link_ops);
1418         if (ret) {
1419                 printk(KERN_ERR "tun: Can't register link_ops\n");
1420                 goto err_linkops;
1421         }
1422
1423         ret = misc_register(&tun_miscdev);
1424         if (ret) {
1425                 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
1426                 goto err_misc;
1427         }
1428         return  0;
1429 err_misc:
1430         rtnl_link_unregister(&tun_link_ops);
1431 err_linkops:
1432         return ret;
1433 }
1434
1435 static void tun_cleanup(void)
1436 {
1437         misc_deregister(&tun_miscdev);
1438         rtnl_link_unregister(&tun_link_ops);
1439 }
1440
1441 module_init(tun_init);
1442 module_exit(tun_cleanup);
1443 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1444 MODULE_AUTHOR(DRV_COPYRIGHT);
1445 MODULE_LICENSE("GPL");
1446 MODULE_ALIAS_MISCDEV(TUN_MINOR);