Merge branch 'linus' into x86/mm
[linux-2.6] / drivers / net / tun.c
1 /*
2  *  TUN - Universal TUN/TAP device driver.
3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  *  GNU General Public License for more details.
14  *
15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16  */
17
18 /*
19  *  Changes:
20  *
21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22  *    Add TUNSETLINK ioctl to set the link encapsulation
23  *
24  *  Mark Smith <markzzzsmith@yahoo.com.au>
25  *    Use random_ether_addr() for tap MAC address.
26  *
27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
28  *    Fixes in packet dropping, queue length setting and queue wakeup.
29  *    Increased default tx queue length.
30  *    Added ethtool API.
31  *    Minor cleanups
32  *
33  *  Daniel Podlejski <underley@underley.eu.org>
34  *    Modifications for 2.3.99-pre5 kernel.
35  */
36
37 #define DRV_NAME        "tun"
38 #define DRV_VERSION     "1.6"
39 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
40 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
41
42 #include <linux/module.h>
43 #include <linux/errno.h>
44 #include <linux/kernel.h>
45 #include <linux/major.h>
46 #include <linux/slab.h>
47 #include <linux/smp_lock.h>
48 #include <linux/poll.h>
49 #include <linux/fcntl.h>
50 #include <linux/init.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/miscdevice.h>
55 #include <linux/ethtool.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/if.h>
58 #include <linux/if_arp.h>
59 #include <linux/if_ether.h>
60 #include <linux/if_tun.h>
61 #include <linux/crc32.h>
62 #include <linux/nsproxy.h>
63 #include <linux/virtio_net.h>
64 #include <net/net_namespace.h>
65 #include <net/netns/generic.h>
66 #include <net/rtnetlink.h>
67 #include <net/sock.h>
68
69 #include <asm/system.h>
70 #include <asm/uaccess.h>
71
72 /* Uncomment to enable debugging */
73 /* #define TUN_DEBUG 1 */
74
75 #ifdef TUN_DEBUG
76 static int debug;
77
78 #define DBG  if(tun->debug)printk
79 #define DBG1 if(debug==2)printk
80 #else
81 #define DBG( a... )
82 #define DBG1( a... )
83 #endif
84
85 #define FLT_EXACT_COUNT 8
86 struct tap_filter {
87         unsigned int    count;    /* Number of addrs. Zero means disabled */
88         u32             mask[2];  /* Mask of the hashed addrs */
89         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
90 };
91
92 struct tun_file {
93         atomic_t count;
94         struct tun_struct *tun;
95         struct net *net;
96         wait_queue_head_t       read_wait;
97 };
98
99 struct tun_sock;
100
101 struct tun_struct {
102         struct tun_file         *tfile;
103         unsigned int            flags;
104         uid_t                   owner;
105         gid_t                   group;
106
107         struct sk_buff_head     readq;
108
109         struct net_device       *dev;
110         struct fasync_struct    *fasync;
111
112         struct tap_filter       txflt;
113         struct sock             *sk;
114         struct socket           socket;
115
116 #ifdef TUN_DEBUG
117         int debug;
118 #endif
119 };
120
121 struct tun_sock {
122         struct sock             sk;
123         struct tun_struct       *tun;
124 };
125
126 static inline struct tun_sock *tun_sk(struct sock *sk)
127 {
128         return container_of(sk, struct tun_sock, sk);
129 }
130
131 static int tun_attach(struct tun_struct *tun, struct file *file)
132 {
133         struct tun_file *tfile = file->private_data;
134         const struct cred *cred = current_cred();
135         int err;
136
137         ASSERT_RTNL();
138
139         /* Check permissions */
140         if (((tun->owner != -1 && cred->euid != tun->owner) ||
141              (tun->group != -1 && !in_egroup_p(tun->group))) &&
142                 !capable(CAP_NET_ADMIN))
143                 return -EPERM;
144
145         netif_tx_lock_bh(tun->dev);
146
147         err = -EINVAL;
148         if (tfile->tun)
149                 goto out;
150
151         err = -EBUSY;
152         if (tun->tfile)
153                 goto out;
154
155         err = 0;
156         tfile->tun = tun;
157         tun->tfile = tfile;
158         dev_hold(tun->dev);
159         atomic_inc(&tfile->count);
160
161 out:
162         netif_tx_unlock_bh(tun->dev);
163         return err;
164 }
165
166 static void __tun_detach(struct tun_struct *tun)
167 {
168         struct tun_file *tfile = tun->tfile;
169
170         /* Detach from net device */
171         netif_tx_lock_bh(tun->dev);
172         tfile->tun = NULL;
173         tun->tfile = NULL;
174         netif_tx_unlock_bh(tun->dev);
175
176         /* Drop read queue */
177         skb_queue_purge(&tun->readq);
178
179         /* Drop the extra count on the net device */
180         dev_put(tun->dev);
181 }
182
183 static void tun_detach(struct tun_struct *tun)
184 {
185         rtnl_lock();
186         __tun_detach(tun);
187         rtnl_unlock();
188 }
189
190 static struct tun_struct *__tun_get(struct tun_file *tfile)
191 {
192         struct tun_struct *tun = NULL;
193
194         if (atomic_inc_not_zero(&tfile->count))
195                 tun = tfile->tun;
196
197         return tun;
198 }
199
200 static struct tun_struct *tun_get(struct file *file)
201 {
202         return __tun_get(file->private_data);
203 }
204
205 static void tun_put(struct tun_struct *tun)
206 {
207         struct tun_file *tfile = tun->tfile;
208
209         if (atomic_dec_and_test(&tfile->count))
210                 tun_detach(tfile->tun);
211 }
212
213 /* TAP filterting */
214 static void addr_hash_set(u32 *mask, const u8 *addr)
215 {
216         int n = ether_crc(ETH_ALEN, addr) >> 26;
217         mask[n >> 5] |= (1 << (n & 31));
218 }
219
220 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
221 {
222         int n = ether_crc(ETH_ALEN, addr) >> 26;
223         return mask[n >> 5] & (1 << (n & 31));
224 }
225
226 static int update_filter(struct tap_filter *filter, void __user *arg)
227 {
228         struct { u8 u[ETH_ALEN]; } *addr;
229         struct tun_filter uf;
230         int err, alen, n, nexact;
231
232         if (copy_from_user(&uf, arg, sizeof(uf)))
233                 return -EFAULT;
234
235         if (!uf.count) {
236                 /* Disabled */
237                 filter->count = 0;
238                 return 0;
239         }
240
241         alen = ETH_ALEN * uf.count;
242         addr = kmalloc(alen, GFP_KERNEL);
243         if (!addr)
244                 return -ENOMEM;
245
246         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
247                 err = -EFAULT;
248                 goto done;
249         }
250
251         /* The filter is updated without holding any locks. Which is
252          * perfectly safe. We disable it first and in the worst
253          * case we'll accept a few undesired packets. */
254         filter->count = 0;
255         wmb();
256
257         /* Use first set of addresses as an exact filter */
258         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
259                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
260
261         nexact = n;
262
263         /* Remaining multicast addresses are hashed,
264          * unicast will leave the filter disabled. */
265         memset(filter->mask, 0, sizeof(filter->mask));
266         for (; n < uf.count; n++) {
267                 if (!is_multicast_ether_addr(addr[n].u)) {
268                         err = 0; /* no filter */
269                         goto done;
270                 }
271                 addr_hash_set(filter->mask, addr[n].u);
272         }
273
274         /* For ALLMULTI just set the mask to all ones.
275          * This overrides the mask populated above. */
276         if ((uf.flags & TUN_FLT_ALLMULTI))
277                 memset(filter->mask, ~0, sizeof(filter->mask));
278
279         /* Now enable the filter */
280         wmb();
281         filter->count = nexact;
282
283         /* Return the number of exact filters */
284         err = nexact;
285
286 done:
287         kfree(addr);
288         return err;
289 }
290
291 /* Returns: 0 - drop, !=0 - accept */
292 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
293 {
294         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
295          * at this point. */
296         struct ethhdr *eh = (struct ethhdr *) skb->data;
297         int i;
298
299         /* Exact match */
300         for (i = 0; i < filter->count; i++)
301                 if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
302                         return 1;
303
304         /* Inexact match (multicast only) */
305         if (is_multicast_ether_addr(eh->h_dest))
306                 return addr_hash_test(filter->mask, eh->h_dest);
307
308         return 0;
309 }
310
311 /*
312  * Checks whether the packet is accepted or not.
313  * Returns: 0 - drop, !=0 - accept
314  */
315 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
316 {
317         if (!filter->count)
318                 return 1;
319
320         return run_filter(filter, skb);
321 }
322
323 /* Network device part of the driver */
324
325 static const struct ethtool_ops tun_ethtool_ops;
326
327 /* Net device detach from fd. */
328 static void tun_net_uninit(struct net_device *dev)
329 {
330         struct tun_struct *tun = netdev_priv(dev);
331         struct tun_file *tfile = tun->tfile;
332
333         /* Inform the methods they need to stop using the dev.
334          */
335         if (tfile) {
336                 wake_up_all(&tfile->read_wait);
337                 if (atomic_dec_and_test(&tfile->count))
338                         __tun_detach(tun);
339         }
340 }
341
342 /* Net device open. */
343 static int tun_net_open(struct net_device *dev)
344 {
345         netif_start_queue(dev);
346         return 0;
347 }
348
349 /* Net device close. */
350 static int tun_net_close(struct net_device *dev)
351 {
352         netif_stop_queue(dev);
353         return 0;
354 }
355
356 /* Net device start xmit */
357 static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
358 {
359         struct tun_struct *tun = netdev_priv(dev);
360
361         DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
362
363         /* Drop packet if interface is not attached */
364         if (!tun->tfile)
365                 goto drop;
366
367         /* Drop if the filter does not like it.
368          * This is a noop if the filter is disabled.
369          * Filter can be enabled only for the TAP devices. */
370         if (!check_filter(&tun->txflt, skb))
371                 goto drop;
372
373         if (skb_queue_len(&tun->readq) >= dev->tx_queue_len) {
374                 if (!(tun->flags & TUN_ONE_QUEUE)) {
375                         /* Normal queueing mode. */
376                         /* Packet scheduler handles dropping of further packets. */
377                         netif_stop_queue(dev);
378
379                         /* We won't see all dropped packets individually, so overrun
380                          * error is more appropriate. */
381                         dev->stats.tx_fifo_errors++;
382                 } else {
383                         /* Single queue mode.
384                          * Driver handles dropping of all packets itself. */
385                         goto drop;
386                 }
387         }
388
389         /* Enqueue packet */
390         skb_queue_tail(&tun->readq, skb);
391         dev->trans_start = jiffies;
392
393         /* Notify and wake up reader process */
394         if (tun->flags & TUN_FASYNC)
395                 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
396         wake_up_interruptible(&tun->tfile->read_wait);
397         return 0;
398
399 drop:
400         dev->stats.tx_dropped++;
401         kfree_skb(skb);
402         return 0;
403 }
404
405 static void tun_net_mclist(struct net_device *dev)
406 {
407         /*
408          * This callback is supposed to deal with mc filter in
409          * _rx_ path and has nothing to do with the _tx_ path.
410          * In rx path we always accept everything userspace gives us.
411          */
412         return;
413 }
414
415 #define MIN_MTU 68
416 #define MAX_MTU 65535
417
418 static int
419 tun_net_change_mtu(struct net_device *dev, int new_mtu)
420 {
421         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
422                 return -EINVAL;
423         dev->mtu = new_mtu;
424         return 0;
425 }
426
427 static const struct net_device_ops tun_netdev_ops = {
428         .ndo_uninit             = tun_net_uninit,
429         .ndo_open               = tun_net_open,
430         .ndo_stop               = tun_net_close,
431         .ndo_start_xmit         = tun_net_xmit,
432         .ndo_change_mtu         = tun_net_change_mtu,
433 };
434
435 static const struct net_device_ops tap_netdev_ops = {
436         .ndo_uninit             = tun_net_uninit,
437         .ndo_open               = tun_net_open,
438         .ndo_stop               = tun_net_close,
439         .ndo_start_xmit         = tun_net_xmit,
440         .ndo_change_mtu         = tun_net_change_mtu,
441         .ndo_set_multicast_list = tun_net_mclist,
442         .ndo_set_mac_address    = eth_mac_addr,
443         .ndo_validate_addr      = eth_validate_addr,
444 };
445
446 /* Initialize net device. */
447 static void tun_net_init(struct net_device *dev)
448 {
449         struct tun_struct *tun = netdev_priv(dev);
450
451         switch (tun->flags & TUN_TYPE_MASK) {
452         case TUN_TUN_DEV:
453                 dev->netdev_ops = &tun_netdev_ops;
454
455                 /* Point-to-Point TUN Device */
456                 dev->hard_header_len = 0;
457                 dev->addr_len = 0;
458                 dev->mtu = 1500;
459
460                 /* Zero header length */
461                 dev->type = ARPHRD_NONE;
462                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
463                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
464                 break;
465
466         case TUN_TAP_DEV:
467                 dev->netdev_ops = &tap_netdev_ops;
468                 /* Ethernet TAP Device */
469                 ether_setup(dev);
470
471                 random_ether_addr(dev->dev_addr);
472
473                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
474                 break;
475         }
476 }
477
478 /* Character device part */
479
480 /* Poll */
481 static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
482 {
483         struct tun_file *tfile = file->private_data;
484         struct tun_struct *tun = __tun_get(tfile);
485         struct sock *sk = tun->sk;
486         unsigned int mask = 0;
487
488         if (!tun)
489                 return POLLERR;
490
491         DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
492
493         poll_wait(file, &tfile->read_wait, wait);
494
495         if (!skb_queue_empty(&tun->readq))
496                 mask |= POLLIN | POLLRDNORM;
497
498         if (sock_writeable(sk) ||
499             (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
500              sock_writeable(sk)))
501                 mask |= POLLOUT | POLLWRNORM;
502
503         if (tun->dev->reg_state != NETREG_REGISTERED)
504                 mask = POLLERR;
505
506         tun_put(tun);
507         return mask;
508 }
509
510 /* prepad is the amount to reserve at front.  len is length after that.
511  * linear is a hint as to how much to copy (usually headers). */
512 static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
513                                             size_t prepad, size_t len,
514                                             size_t linear, int noblock)
515 {
516         struct sock *sk = tun->sk;
517         struct sk_buff *skb;
518         int err;
519
520         /* Under a page?  Don't bother with paged skb. */
521         if (prepad + len < PAGE_SIZE || !linear)
522                 linear = len;
523
524         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
525                                    &err);
526         if (!skb)
527                 return ERR_PTR(err);
528
529         skb_reserve(skb, prepad);
530         skb_put(skb, linear);
531         skb->data_len = len - linear;
532         skb->len += len - linear;
533
534         return skb;
535 }
536
537 /* Get packet from user space buffer */
538 static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
539                                        struct iovec *iv, size_t count,
540                                        int noblock)
541 {
542         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
543         struct sk_buff *skb;
544         size_t len = count, align = 0;
545         struct virtio_net_hdr gso = { 0 };
546
547         if (!(tun->flags & TUN_NO_PI)) {
548                 if ((len -= sizeof(pi)) > count)
549                         return -EINVAL;
550
551                 if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
552                         return -EFAULT;
553         }
554
555         if (tun->flags & TUN_VNET_HDR) {
556                 if ((len -= sizeof(gso)) > count)
557                         return -EINVAL;
558
559                 if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso)))
560                         return -EFAULT;
561
562                 if (gso.hdr_len > len)
563                         return -EINVAL;
564         }
565
566         if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
567                 align = NET_IP_ALIGN;
568                 if (unlikely(len < ETH_HLEN ||
569                              (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
570                         return -EINVAL;
571         }
572
573         skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
574         if (IS_ERR(skb)) {
575                 if (PTR_ERR(skb) != -EAGAIN)
576                         tun->dev->stats.rx_dropped++;
577                 return PTR_ERR(skb);
578         }
579
580         if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
581                 tun->dev->stats.rx_dropped++;
582                 kfree_skb(skb);
583                 return -EFAULT;
584         }
585
586         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
587                 if (!skb_partial_csum_set(skb, gso.csum_start,
588                                           gso.csum_offset)) {
589                         tun->dev->stats.rx_frame_errors++;
590                         kfree_skb(skb);
591                         return -EINVAL;
592                 }
593         } else if (tun->flags & TUN_NOCHECKSUM)
594                 skb->ip_summed = CHECKSUM_UNNECESSARY;
595
596         switch (tun->flags & TUN_TYPE_MASK) {
597         case TUN_TUN_DEV:
598                 if (tun->flags & TUN_NO_PI) {
599                         switch (skb->data[0] & 0xf0) {
600                         case 0x40:
601                                 pi.proto = htons(ETH_P_IP);
602                                 break;
603                         case 0x60:
604                                 pi.proto = htons(ETH_P_IPV6);
605                                 break;
606                         default:
607                                 tun->dev->stats.rx_dropped++;
608                                 kfree_skb(skb);
609                                 return -EINVAL;
610                         }
611                 }
612
613                 skb_reset_mac_header(skb);
614                 skb->protocol = pi.proto;
615                 skb->dev = tun->dev;
616                 break;
617         case TUN_TAP_DEV:
618                 skb->protocol = eth_type_trans(skb, tun->dev);
619                 break;
620         };
621
622         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
623                 pr_debug("GSO!\n");
624                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
625                 case VIRTIO_NET_HDR_GSO_TCPV4:
626                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
627                         break;
628                 case VIRTIO_NET_HDR_GSO_TCPV6:
629                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
630                         break;
631                 default:
632                         tun->dev->stats.rx_frame_errors++;
633                         kfree_skb(skb);
634                         return -EINVAL;
635                 }
636
637                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
638                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
639
640                 skb_shinfo(skb)->gso_size = gso.gso_size;
641                 if (skb_shinfo(skb)->gso_size == 0) {
642                         tun->dev->stats.rx_frame_errors++;
643                         kfree_skb(skb);
644                         return -EINVAL;
645                 }
646
647                 /* Header must be checked, and gso_segs computed. */
648                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
649                 skb_shinfo(skb)->gso_segs = 0;
650         }
651
652         netif_rx_ni(skb);
653
654         tun->dev->stats.rx_packets++;
655         tun->dev->stats.rx_bytes += len;
656
657         return count;
658 }
659
660 static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
661                               unsigned long count, loff_t pos)
662 {
663         struct file *file = iocb->ki_filp;
664         struct tun_struct *tun = tun_get(file);
665         ssize_t result;
666
667         if (!tun)
668                 return -EBADFD;
669
670         DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
671
672         result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count),
673                               file->f_flags & O_NONBLOCK);
674
675         tun_put(tun);
676         return result;
677 }
678
679 /* Put packet to the user space buffer */
680 static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
681                                        struct sk_buff *skb,
682                                        struct iovec *iv, int len)
683 {
684         struct tun_pi pi = { 0, skb->protocol };
685         ssize_t total = 0;
686
687         if (!(tun->flags & TUN_NO_PI)) {
688                 if ((len -= sizeof(pi)) < 0)
689                         return -EINVAL;
690
691                 if (len < skb->len) {
692                         /* Packet will be striped */
693                         pi.flags |= TUN_PKT_STRIP;
694                 }
695
696                 if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))
697                         return -EFAULT;
698                 total += sizeof(pi);
699         }
700
701         if (tun->flags & TUN_VNET_HDR) {
702                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
703                 if ((len -= sizeof(gso)) < 0)
704                         return -EINVAL;
705
706                 if (skb_is_gso(skb)) {
707                         struct skb_shared_info *sinfo = skb_shinfo(skb);
708
709                         /* This is a hint as to how much should be linear. */
710                         gso.hdr_len = skb_headlen(skb);
711                         gso.gso_size = sinfo->gso_size;
712                         if (sinfo->gso_type & SKB_GSO_TCPV4)
713                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
714                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
715                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
716                         else
717                                 BUG();
718                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
719                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
720                 } else
721                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
722
723                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
724                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
725                         gso.csum_start = skb->csum_start - skb_headroom(skb);
726                         gso.csum_offset = skb->csum_offset;
727                 } /* else everything is zero */
728
729                 if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso))))
730                         return -EFAULT;
731                 total += sizeof(gso);
732         }
733
734         len = min_t(int, skb->len, len);
735
736         skb_copy_datagram_iovec(skb, 0, iv, len);
737         total += len;
738
739         tun->dev->stats.tx_packets++;
740         tun->dev->stats.tx_bytes += len;
741
742         return total;
743 }
744
745 static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
746                             unsigned long count, loff_t pos)
747 {
748         struct file *file = iocb->ki_filp;
749         struct tun_file *tfile = file->private_data;
750         struct tun_struct *tun = __tun_get(tfile);
751         DECLARE_WAITQUEUE(wait, current);
752         struct sk_buff *skb;
753         ssize_t len, ret = 0;
754
755         if (!tun)
756                 return -EBADFD;
757
758         DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
759
760         len = iov_length(iv, count);
761         if (len < 0) {
762                 ret = -EINVAL;
763                 goto out;
764         }
765
766         add_wait_queue(&tfile->read_wait, &wait);
767         while (len) {
768                 current->state = TASK_INTERRUPTIBLE;
769
770                 /* Read frames from the queue */
771                 if (!(skb=skb_dequeue(&tun->readq))) {
772                         if (file->f_flags & O_NONBLOCK) {
773                                 ret = -EAGAIN;
774                                 break;
775                         }
776                         if (signal_pending(current)) {
777                                 ret = -ERESTARTSYS;
778                                 break;
779                         }
780                         if (tun->dev->reg_state != NETREG_REGISTERED) {
781                                 ret = -EIO;
782                                 break;
783                         }
784
785                         /* Nothing to read, let's sleep */
786                         schedule();
787                         continue;
788                 }
789                 netif_wake_queue(tun->dev);
790
791                 ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
792                 kfree_skb(skb);
793                 break;
794         }
795
796         current->state = TASK_RUNNING;
797         remove_wait_queue(&tfile->read_wait, &wait);
798
799 out:
800         tun_put(tun);
801         return ret;
802 }
803
804 static void tun_setup(struct net_device *dev)
805 {
806         struct tun_struct *tun = netdev_priv(dev);
807
808         skb_queue_head_init(&tun->readq);
809
810         tun->owner = -1;
811         tun->group = -1;
812
813         dev->ethtool_ops = &tun_ethtool_ops;
814         dev->destructor = free_netdev;
815 }
816
817 /* Trivial set of netlink ops to allow deleting tun or tap
818  * device with netlink.
819  */
820 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
821 {
822         return -EINVAL;
823 }
824
825 static struct rtnl_link_ops tun_link_ops __read_mostly = {
826         .kind           = DRV_NAME,
827         .priv_size      = sizeof(struct tun_struct),
828         .setup          = tun_setup,
829         .validate       = tun_validate,
830 };
831
832 static void tun_sock_write_space(struct sock *sk)
833 {
834         struct tun_struct *tun;
835
836         if (!sock_writeable(sk))
837                 return;
838
839         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
840                 wake_up_interruptible_sync(sk->sk_sleep);
841
842         if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
843                 return;
844
845         tun = container_of(sk, struct tun_sock, sk)->tun;
846         kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
847 }
848
849 static void tun_sock_destruct(struct sock *sk)
850 {
851         dev_put(container_of(sk, struct tun_sock, sk)->tun->dev);
852 }
853
854 static struct proto tun_proto = {
855         .name           = "tun",
856         .owner          = THIS_MODULE,
857         .obj_size       = sizeof(struct tun_sock),
858 };
859
860 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
861 {
862         struct sock *sk;
863         struct tun_struct *tun;
864         struct net_device *dev;
865         struct tun_file *tfile = file->private_data;
866         int err;
867
868         dev = __dev_get_by_name(net, ifr->ifr_name);
869         if (dev) {
870                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
871                         tun = netdev_priv(dev);
872                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
873                         tun = netdev_priv(dev);
874                 else
875                         return -EINVAL;
876
877                 err = tun_attach(tun, file);
878                 if (err < 0)
879                         return err;
880         }
881         else {
882                 char *name;
883                 unsigned long flags = 0;
884
885                 err = -EINVAL;
886
887                 if (!capable(CAP_NET_ADMIN))
888                         return -EPERM;
889
890                 /* Set dev type */
891                 if (ifr->ifr_flags & IFF_TUN) {
892                         /* TUN device */
893                         flags |= TUN_TUN_DEV;
894                         name = "tun%d";
895                 } else if (ifr->ifr_flags & IFF_TAP) {
896                         /* TAP device */
897                         flags |= TUN_TAP_DEV;
898                         name = "tap%d";
899                 } else
900                         goto failed;
901
902                 if (*ifr->ifr_name)
903                         name = ifr->ifr_name;
904
905                 dev = alloc_netdev(sizeof(struct tun_struct), name,
906                                    tun_setup);
907                 if (!dev)
908                         return -ENOMEM;
909
910                 dev_net_set(dev, net);
911                 dev->rtnl_link_ops = &tun_link_ops;
912
913                 tun = netdev_priv(dev);
914                 tun->dev = dev;
915                 tun->flags = flags;
916                 tun->txflt.count = 0;
917
918                 err = -ENOMEM;
919                 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
920                 if (!sk)
921                         goto err_free_dev;
922
923                 /* This ref count is for tun->sk. */
924                 dev_hold(dev);
925                 sock_init_data(&tun->socket, sk);
926                 sk->sk_write_space = tun_sock_write_space;
927                 sk->sk_destruct = tun_sock_destruct;
928                 sk->sk_sndbuf = INT_MAX;
929                 sk->sk_sleep = &tfile->read_wait;
930
931                 tun->sk = sk;
932                 container_of(sk, struct tun_sock, sk)->tun = tun;
933
934                 tun_net_init(dev);
935
936                 if (strchr(dev->name, '%')) {
937                         err = dev_alloc_name(dev, dev->name);
938                         if (err < 0)
939                                 goto err_free_sk;
940                 }
941
942                 err = -EINVAL;
943                 err = register_netdevice(tun->dev);
944                 if (err < 0)
945                         goto err_free_dev;
946
947                 err = tun_attach(tun, file);
948                 if (err < 0)
949                         goto err_free_dev;
950         }
951
952         DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
953
954         if (ifr->ifr_flags & IFF_NO_PI)
955                 tun->flags |= TUN_NO_PI;
956         else
957                 tun->flags &= ~TUN_NO_PI;
958
959         if (ifr->ifr_flags & IFF_ONE_QUEUE)
960                 tun->flags |= TUN_ONE_QUEUE;
961         else
962                 tun->flags &= ~TUN_ONE_QUEUE;
963
964         if (ifr->ifr_flags & IFF_VNET_HDR)
965                 tun->flags |= TUN_VNET_HDR;
966         else
967                 tun->flags &= ~TUN_VNET_HDR;
968
969         /* Make sure persistent devices do not get stuck in
970          * xoff state.
971          */
972         if (netif_running(tun->dev))
973                 netif_wake_queue(tun->dev);
974
975         strcpy(ifr->ifr_name, tun->dev->name);
976         return 0;
977
978  err_free_sk:
979         sock_put(sk);
980  err_free_dev:
981         free_netdev(dev);
982  failed:
983         return err;
984 }
985
986 static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
987 {
988         struct tun_struct *tun = tun_get(file);
989
990         if (!tun)
991                 return -EBADFD;
992
993         DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
994
995         strcpy(ifr->ifr_name, tun->dev->name);
996
997         ifr->ifr_flags = 0;
998
999         if (ifr->ifr_flags & TUN_TUN_DEV)
1000                 ifr->ifr_flags |= IFF_TUN;
1001         else
1002                 ifr->ifr_flags |= IFF_TAP;
1003
1004         if (tun->flags & TUN_NO_PI)
1005                 ifr->ifr_flags |= IFF_NO_PI;
1006
1007         if (tun->flags & TUN_ONE_QUEUE)
1008                 ifr->ifr_flags |= IFF_ONE_QUEUE;
1009
1010         if (tun->flags & TUN_VNET_HDR)
1011                 ifr->ifr_flags |= IFF_VNET_HDR;
1012
1013         tun_put(tun);
1014         return 0;
1015 }
1016
1017 /* This is like a cut-down ethtool ops, except done via tun fd so no
1018  * privs required. */
1019 static int set_offload(struct net_device *dev, unsigned long arg)
1020 {
1021         unsigned int old_features, features;
1022
1023         old_features = dev->features;
1024         /* Unset features, set them as we chew on the arg. */
1025         features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST
1026                                     |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6));
1027
1028         if (arg & TUN_F_CSUM) {
1029                 features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1030                 arg &= ~TUN_F_CSUM;
1031
1032                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1033                         if (arg & TUN_F_TSO_ECN) {
1034                                 features |= NETIF_F_TSO_ECN;
1035                                 arg &= ~TUN_F_TSO_ECN;
1036                         }
1037                         if (arg & TUN_F_TSO4)
1038                                 features |= NETIF_F_TSO;
1039                         if (arg & TUN_F_TSO6)
1040                                 features |= NETIF_F_TSO6;
1041                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1042                 }
1043         }
1044
1045         /* This gives the user a way to test for new features in future by
1046          * trying to set them. */
1047         if (arg)
1048                 return -EINVAL;
1049
1050         dev->features = features;
1051         if (old_features != dev->features)
1052                 netdev_features_change(dev);
1053
1054         return 0;
1055 }
1056
1057 static int tun_chr_ioctl(struct inode *inode, struct file *file,
1058                          unsigned int cmd, unsigned long arg)
1059 {
1060         struct tun_file *tfile = file->private_data;
1061         struct tun_struct *tun;
1062         void __user* argp = (void __user*)arg;
1063         struct ifreq ifr;
1064         int sndbuf;
1065         int ret;
1066
1067         if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1068                 if (copy_from_user(&ifr, argp, sizeof ifr))
1069                         return -EFAULT;
1070
1071         if (cmd == TUNGETFEATURES) {
1072                 /* Currently this just means: "what IFF flags are valid?".
1073                  * This is needed because we never checked for invalid flags on
1074                  * TUNSETIFF. */
1075                 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1076                                 IFF_VNET_HDR,
1077                                 (unsigned int __user*)argp);
1078         }
1079
1080         tun = __tun_get(tfile);
1081         if (cmd == TUNSETIFF && !tun) {
1082                 int err;
1083
1084                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1085
1086                 rtnl_lock();
1087                 err = tun_set_iff(tfile->net, file, &ifr);
1088                 rtnl_unlock();
1089
1090                 if (err)
1091                         return err;
1092
1093                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1094                         return -EFAULT;
1095                 return 0;
1096         }
1097
1098
1099         if (!tun)
1100                 return -EBADFD;
1101
1102         DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
1103
1104         ret = 0;
1105         switch (cmd) {
1106         case TUNGETIFF:
1107                 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
1108                 if (ret)
1109                         break;
1110
1111                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1112                         ret = -EFAULT;
1113                 break;
1114
1115         case TUNSETNOCSUM:
1116                 /* Disable/Enable checksum */
1117                 if (arg)
1118                         tun->flags |= TUN_NOCHECKSUM;
1119                 else
1120                         tun->flags &= ~TUN_NOCHECKSUM;
1121
1122                 DBG(KERN_INFO "%s: checksum %s\n",
1123                     tun->dev->name, arg ? "disabled" : "enabled");
1124                 break;
1125
1126         case TUNSETPERSIST:
1127                 /* Disable/Enable persist mode */
1128                 if (arg)
1129                         tun->flags |= TUN_PERSIST;
1130                 else
1131                         tun->flags &= ~TUN_PERSIST;
1132
1133                 DBG(KERN_INFO "%s: persist %s\n",
1134                     tun->dev->name, arg ? "enabled" : "disabled");
1135                 break;
1136
1137         case TUNSETOWNER:
1138                 /* Set owner of the device */
1139                 tun->owner = (uid_t) arg;
1140
1141                 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
1142                 break;
1143
1144         case TUNSETGROUP:
1145                 /* Set group of the device */
1146                 tun->group= (gid_t) arg;
1147
1148                 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
1149                 break;
1150
1151         case TUNSETLINK:
1152                 /* Only allow setting the type when the interface is down */
1153                 rtnl_lock();
1154                 if (tun->dev->flags & IFF_UP) {
1155                         DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
1156                                 tun->dev->name);
1157                         ret = -EBUSY;
1158                 } else {
1159                         tun->dev->type = (int) arg;
1160                         DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
1161                         ret = 0;
1162                 }
1163                 rtnl_unlock();
1164                 break;
1165
1166 #ifdef TUN_DEBUG
1167         case TUNSETDEBUG:
1168                 tun->debug = arg;
1169                 break;
1170 #endif
1171         case TUNSETOFFLOAD:
1172                 rtnl_lock();
1173                 ret = set_offload(tun->dev, arg);
1174                 rtnl_unlock();
1175                 break;
1176
1177         case TUNSETTXFILTER:
1178                 /* Can be set only for TAPs */
1179                 ret = -EINVAL;
1180                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1181                         break;
1182                 rtnl_lock();
1183                 ret = update_filter(&tun->txflt, (void __user *)arg);
1184                 rtnl_unlock();
1185                 break;
1186
1187         case SIOCGIFHWADDR:
1188                 /* Get hw addres */
1189                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1190                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1191                 if (copy_to_user(argp, &ifr, sizeof ifr))
1192                         ret = -EFAULT;
1193                 break;
1194
1195         case SIOCSIFHWADDR:
1196                 /* Set hw address */
1197                 DBG(KERN_DEBUG "%s: set hw address: %pM\n",
1198                         tun->dev->name, ifr.ifr_hwaddr.sa_data);
1199
1200                 rtnl_lock();
1201                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1202                 rtnl_unlock();
1203                 break;
1204
1205         case TUNGETSNDBUF:
1206                 sndbuf = tun->sk->sk_sndbuf;
1207                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1208                         ret = -EFAULT;
1209                 break;
1210
1211         case TUNSETSNDBUF:
1212                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1213                         ret = -EFAULT;
1214                         break;
1215                 }
1216
1217                 tun->sk->sk_sndbuf = sndbuf;
1218                 break;
1219
1220         default:
1221                 ret = -EINVAL;
1222                 break;
1223         };
1224
1225         tun_put(tun);
1226         return ret;
1227 }
1228
1229 static int tun_chr_fasync(int fd, struct file *file, int on)
1230 {
1231         struct tun_struct *tun = tun_get(file);
1232         int ret;
1233
1234         if (!tun)
1235                 return -EBADFD;
1236
1237         DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
1238
1239         lock_kernel();
1240         if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1241                 goto out;
1242
1243         if (on) {
1244                 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1245                 if (ret)
1246                         goto out;
1247                 tun->flags |= TUN_FASYNC;
1248         } else
1249                 tun->flags &= ~TUN_FASYNC;
1250         ret = 0;
1251 out:
1252         unlock_kernel();
1253         tun_put(tun);
1254         return ret;
1255 }
1256
1257 static int tun_chr_open(struct inode *inode, struct file * file)
1258 {
1259         struct tun_file *tfile;
1260         cycle_kernel_lock();
1261         DBG1(KERN_INFO "tunX: tun_chr_open\n");
1262
1263         tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1264         if (!tfile)
1265                 return -ENOMEM;
1266         atomic_set(&tfile->count, 0);
1267         tfile->tun = NULL;
1268         tfile->net = get_net(current->nsproxy->net_ns);
1269         init_waitqueue_head(&tfile->read_wait);
1270         file->private_data = tfile;
1271         return 0;
1272 }
1273
1274 static int tun_chr_close(struct inode *inode, struct file *file)
1275 {
1276         struct tun_file *tfile = file->private_data;
1277         struct tun_struct *tun = __tun_get(tfile);
1278
1279
1280         if (tun) {
1281                 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1282
1283                 rtnl_lock();
1284                 __tun_detach(tun);
1285
1286                 /* If desireable, unregister the netdevice. */
1287                 if (!(tun->flags & TUN_PERSIST)) {
1288                         sock_put(tun->sk);
1289                         unregister_netdevice(tun->dev);
1290                 }
1291
1292                 rtnl_unlock();
1293         }
1294
1295         put_net(tfile->net);
1296         kfree(tfile);
1297
1298         return 0;
1299 }
1300
1301 static const struct file_operations tun_fops = {
1302         .owner  = THIS_MODULE,
1303         .llseek = no_llseek,
1304         .read  = do_sync_read,
1305         .aio_read  = tun_chr_aio_read,
1306         .write = do_sync_write,
1307         .aio_write = tun_chr_aio_write,
1308         .poll   = tun_chr_poll,
1309         .ioctl  = tun_chr_ioctl,
1310         .open   = tun_chr_open,
1311         .release = tun_chr_close,
1312         .fasync = tun_chr_fasync
1313 };
1314
1315 static struct miscdevice tun_miscdev = {
1316         .minor = TUN_MINOR,
1317         .name = "tun",
1318         .fops = &tun_fops,
1319 };
1320
1321 /* ethtool interface */
1322
1323 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1324 {
1325         cmd->supported          = 0;
1326         cmd->advertising        = 0;
1327         cmd->speed              = SPEED_10;
1328         cmd->duplex             = DUPLEX_FULL;
1329         cmd->port               = PORT_TP;
1330         cmd->phy_address        = 0;
1331         cmd->transceiver        = XCVR_INTERNAL;
1332         cmd->autoneg            = AUTONEG_DISABLE;
1333         cmd->maxtxpkt           = 0;
1334         cmd->maxrxpkt           = 0;
1335         return 0;
1336 }
1337
1338 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1339 {
1340         struct tun_struct *tun = netdev_priv(dev);
1341
1342         strcpy(info->driver, DRV_NAME);
1343         strcpy(info->version, DRV_VERSION);
1344         strcpy(info->fw_version, "N/A");
1345
1346         switch (tun->flags & TUN_TYPE_MASK) {
1347         case TUN_TUN_DEV:
1348                 strcpy(info->bus_info, "tun");
1349                 break;
1350         case TUN_TAP_DEV:
1351                 strcpy(info->bus_info, "tap");
1352                 break;
1353         }
1354 }
1355
1356 static u32 tun_get_msglevel(struct net_device *dev)
1357 {
1358 #ifdef TUN_DEBUG
1359         struct tun_struct *tun = netdev_priv(dev);
1360         return tun->debug;
1361 #else
1362         return -EOPNOTSUPP;
1363 #endif
1364 }
1365
1366 static void tun_set_msglevel(struct net_device *dev, u32 value)
1367 {
1368 #ifdef TUN_DEBUG
1369         struct tun_struct *tun = netdev_priv(dev);
1370         tun->debug = value;
1371 #endif
1372 }
1373
1374 static u32 tun_get_link(struct net_device *dev)
1375 {
1376         struct tun_struct *tun = netdev_priv(dev);
1377         return !!tun->tfile;
1378 }
1379
1380 static u32 tun_get_rx_csum(struct net_device *dev)
1381 {
1382         struct tun_struct *tun = netdev_priv(dev);
1383         return (tun->flags & TUN_NOCHECKSUM) == 0;
1384 }
1385
1386 static int tun_set_rx_csum(struct net_device *dev, u32 data)
1387 {
1388         struct tun_struct *tun = netdev_priv(dev);
1389         if (data)
1390                 tun->flags &= ~TUN_NOCHECKSUM;
1391         else
1392                 tun->flags |= TUN_NOCHECKSUM;
1393         return 0;
1394 }
1395
1396 static const struct ethtool_ops tun_ethtool_ops = {
1397         .get_settings   = tun_get_settings,
1398         .get_drvinfo    = tun_get_drvinfo,
1399         .get_msglevel   = tun_get_msglevel,
1400         .set_msglevel   = tun_set_msglevel,
1401         .get_link       = tun_get_link,
1402         .get_rx_csum    = tun_get_rx_csum,
1403         .set_rx_csum    = tun_set_rx_csum
1404 };
1405
1406
1407 static int __init tun_init(void)
1408 {
1409         int ret = 0;
1410
1411         printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1412         printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
1413
1414         ret = rtnl_link_register(&tun_link_ops);
1415         if (ret) {
1416                 printk(KERN_ERR "tun: Can't register link_ops\n");
1417                 goto err_linkops;
1418         }
1419
1420         ret = misc_register(&tun_miscdev);
1421         if (ret) {
1422                 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
1423                 goto err_misc;
1424         }
1425         return  0;
1426 err_misc:
1427         rtnl_link_unregister(&tun_link_ops);
1428 err_linkops:
1429         return ret;
1430 }
1431
1432 static void tun_cleanup(void)
1433 {
1434         misc_deregister(&tun_miscdev);
1435         rtnl_link_unregister(&tun_link_ops);
1436 }
1437
1438 module_init(tun_init);
1439 module_exit(tun_cleanup);
1440 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1441 MODULE_AUTHOR(DRV_COPYRIGHT);
1442 MODULE_LICENSE("GPL");
1443 MODULE_ALIAS_MISCDEV(TUN_MINOR);