2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
143 * the average user (w/out VLANs) will not be adversely affected.
160 #define PTYPE_HASH_SIZE (16)
161 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
163 static DEFINE_SPINLOCK(ptype_lock);
164 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165 static struct list_head ptype_all __read_mostly; /* Taps */
167 #ifdef CONFIG_NET_DMA
169 struct dma_client client;
171 cpumask_t channel_mask;
172 struct dma_chan **channels;
175 static enum dma_state_client
176 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
179 static struct net_dma net_dma = {
181 .event_callback = netdev_dma_event,
187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
190 * Pure readers hold dev_base_lock for reading.
192 * Writers must hold the rtnl semaphore while they loop through the
193 * dev_base_head list, and hold dev_base_lock for writing when they do the
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
205 DEFINE_RWLOCK(dev_base_lock);
207 EXPORT_SYMBOL(dev_base_lock);
209 #define NETDEV_HASHBITS 8
210 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
212 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
223 /* Device list insertion */
224 static int list_netdevice(struct net_device *dev)
226 struct net *net = dev_net(dev);
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
238 /* Device list removal */
239 static void unlist_netdevice(struct net_device *dev)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU(struct softnet_data, softnet_data);
264 #ifdef CONFIG_DEBUG_LOCK_ALLOC
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type
269 static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
286 static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
303 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
305 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
309 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
310 if (netdev_lock_type[i] == dev_type)
312 /* the last key is used by default */
313 return ARRAY_SIZE(netdev_lock_type) - 1;
316 static inline void netdev_set_lockdep_class(spinlock_t *lock,
317 unsigned short dev_type)
321 i = netdev_lock_pos(dev_type);
322 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
323 netdev_lock_name[i]);
326 static inline void netdev_set_lockdep_class(spinlock_t *lock,
327 unsigned short dev_type)
332 /*******************************************************************************
334 Protocol management and registration routines
336 *******************************************************************************/
339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
355 * dev_add_pack - add packet handler
356 * @pt: packet type declaration
358 * Add a protocol handler to the networking stack. The passed &packet_type
359 * is linked into kernel lists and may not be freed until it has been
360 * removed from the kernel lists.
362 * This call does not sleep therefore it can not
363 * guarantee all CPU's that are in middle of receiving packets
364 * will see the new packet type (until the next received packet).
367 void dev_add_pack(struct packet_type *pt)
371 spin_lock_bh(&ptype_lock);
372 if (pt->type == htons(ETH_P_ALL))
373 list_add_rcu(&pt->list, &ptype_all);
375 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
376 list_add_rcu(&pt->list, &ptype_base[hash]);
378 spin_unlock_bh(&ptype_lock);
382 * __dev_remove_pack - remove packet handler
383 * @pt: packet type declaration
385 * Remove a protocol handler that was previously added to the kernel
386 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
387 * from the kernel lists and can be freed or reused once this function
390 * The packet type might still be in use by receivers
391 * and must not be freed until after all the CPU's have gone
392 * through a quiescent state.
394 void __dev_remove_pack(struct packet_type *pt)
396 struct list_head *head;
397 struct packet_type *pt1;
399 spin_lock_bh(&ptype_lock);
401 if (pt->type == htons(ETH_P_ALL))
404 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
406 list_for_each_entry(pt1, head, list) {
408 list_del_rcu(&pt->list);
413 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
415 spin_unlock_bh(&ptype_lock);
418 * dev_remove_pack - remove packet handler
419 * @pt: packet type declaration
421 * Remove a protocol handler that was previously added to the kernel
422 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
423 * from the kernel lists and can be freed or reused once this function
426 * This call sleeps to guarantee that no CPU is looking at the packet
429 void dev_remove_pack(struct packet_type *pt)
431 __dev_remove_pack(pt);
436 /******************************************************************************
438 Device Boot-time Settings Routines
440 *******************************************************************************/
442 /* Boot time configuration table */
443 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
446 * netdev_boot_setup_add - add new setup entry
447 * @name: name of the device
448 * @map: configured settings for the device
450 * Adds new setup entry to the dev_boot_setup list. The function
451 * returns 0 on error and 1 on success. This is a generic routine to
454 static int netdev_boot_setup_add(char *name, struct ifmap *map)
456 struct netdev_boot_setup *s;
460 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
461 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
462 memset(s[i].name, 0, sizeof(s[i].name));
463 strlcpy(s[i].name, name, IFNAMSIZ);
464 memcpy(&s[i].map, map, sizeof(s[i].map));
469 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
473 * netdev_boot_setup_check - check boot time settings
474 * @dev: the netdevice
476 * Check boot time settings for the device.
477 * The found settings are set for the device to be used
478 * later in the device probing.
479 * Returns 0 if no settings found, 1 if they are.
481 int netdev_boot_setup_check(struct net_device *dev)
483 struct netdev_boot_setup *s = dev_boot_setup;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
488 !strcmp(dev->name, s[i].name)) {
489 dev->irq = s[i].map.irq;
490 dev->base_addr = s[i].map.base_addr;
491 dev->mem_start = s[i].map.mem_start;
492 dev->mem_end = s[i].map.mem_end;
501 * netdev_boot_base - get address from boot time settings
502 * @prefix: prefix for network device
503 * @unit: id for network device
505 * Check boot time settings for the base address of device.
506 * The found settings are set for the device to be used
507 * later in the device probing.
508 * Returns 0 if no settings found.
510 unsigned long netdev_boot_base(const char *prefix, int unit)
512 const struct netdev_boot_setup *s = dev_boot_setup;
516 sprintf(name, "%s%d", prefix, unit);
519 * If device already registered then return base of 1
520 * to indicate not to probe for this interface
522 if (__dev_get_by_name(&init_net, name))
525 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
526 if (!strcmp(name, s[i].name))
527 return s[i].map.base_addr;
532 * Saves at boot time configured settings for any netdevice.
534 int __init netdev_boot_setup(char *str)
539 str = get_options(str, ARRAY_SIZE(ints), ints);
544 memset(&map, 0, sizeof(map));
548 map.base_addr = ints[2];
550 map.mem_start = ints[3];
552 map.mem_end = ints[4];
554 /* Add new entry to the list */
555 return netdev_boot_setup_add(str, &map);
558 __setup("netdev=", netdev_boot_setup);
560 /*******************************************************************************
562 Device Interface Subroutines
564 *******************************************************************************/
567 * __dev_get_by_name - find a device by its name
568 * @net: the applicable net namespace
569 * @name: name to find
571 * Find an interface by name. Must be called under RTNL semaphore
572 * or @dev_base_lock. If the name is found a pointer to the device
573 * is returned. If the name is not found then %NULL is returned. The
574 * reference counters are not incremented so the caller must be
575 * careful with locks.
578 struct net_device *__dev_get_by_name(struct net *net, const char *name)
580 struct hlist_node *p;
582 hlist_for_each(p, dev_name_hash(net, name)) {
583 struct net_device *dev
584 = hlist_entry(p, struct net_device, name_hlist);
585 if (!strncmp(dev->name, name, IFNAMSIZ))
592 * dev_get_by_name - find a device by its name
593 * @net: the applicable net namespace
594 * @name: name to find
596 * Find an interface by name. This can be called from any
597 * context and does its own locking. The returned handle has
598 * the usage count incremented and the caller must use dev_put() to
599 * release it when it is no longer needed. %NULL is returned if no
600 * matching device is found.
603 struct net_device *dev_get_by_name(struct net *net, const char *name)
605 struct net_device *dev;
607 read_lock(&dev_base_lock);
608 dev = __dev_get_by_name(net, name);
611 read_unlock(&dev_base_lock);
616 * __dev_get_by_index - find a device by its ifindex
617 * @net: the applicable net namespace
618 * @ifindex: index of device
620 * Search for an interface by index. Returns %NULL if the device
621 * is not found or a pointer to the device. The device has not
622 * had its reference counter increased so the caller must be careful
623 * about locking. The caller must hold either the RTNL semaphore
627 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
629 struct hlist_node *p;
631 hlist_for_each(p, dev_index_hash(net, ifindex)) {
632 struct net_device *dev
633 = hlist_entry(p, struct net_device, index_hlist);
634 if (dev->ifindex == ifindex)
642 * dev_get_by_index - find a device by its ifindex
643 * @net: the applicable net namespace
644 * @ifindex: index of device
646 * Search for an interface by index. Returns NULL if the device
647 * is not found or a pointer to the device. The device returned has
648 * had a reference added and the pointer is safe until the user calls
649 * dev_put to indicate they have finished with it.
652 struct net_device *dev_get_by_index(struct net *net, int ifindex)
654 struct net_device *dev;
656 read_lock(&dev_base_lock);
657 dev = __dev_get_by_index(net, ifindex);
660 read_unlock(&dev_base_lock);
665 * dev_getbyhwaddr - find a device by its hardware address
666 * @net: the applicable net namespace
667 * @type: media type of device
668 * @ha: hardware address
670 * Search for an interface by MAC address. Returns NULL if the device
671 * is not found or a pointer to the device. The caller must hold the
672 * rtnl semaphore. The returned device has not had its ref count increased
673 * and the caller must therefore be careful about locking
676 * If the API was consistent this would be __dev_get_by_hwaddr
679 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
681 struct net_device *dev;
685 for_each_netdev(net, dev)
686 if (dev->type == type &&
687 !memcmp(dev->dev_addr, ha, dev->addr_len))
693 EXPORT_SYMBOL(dev_getbyhwaddr);
695 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
697 struct net_device *dev;
700 for_each_netdev(net, dev)
701 if (dev->type == type)
707 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
709 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
711 struct net_device *dev;
714 dev = __dev_getfirstbyhwtype(net, type);
721 EXPORT_SYMBOL(dev_getfirstbyhwtype);
724 * dev_get_by_flags - find any device with given flags
725 * @net: the applicable net namespace
726 * @if_flags: IFF_* values
727 * @mask: bitmask of bits in if_flags to check
729 * Search for any interface with the given flags. Returns NULL if a device
730 * is not found or a pointer to the device. The device returned has
731 * had a reference added and the pointer is safe until the user calls
732 * dev_put to indicate they have finished with it.
735 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
737 struct net_device *dev, *ret;
740 read_lock(&dev_base_lock);
741 for_each_netdev(net, dev) {
742 if (((dev->flags ^ if_flags) & mask) == 0) {
748 read_unlock(&dev_base_lock);
753 * dev_valid_name - check if name is okay for network device
756 * Network device names need to be valid file names to
757 * to allow sysfs to work. We also disallow any kind of
760 int dev_valid_name(const char *name)
764 if (strlen(name) >= IFNAMSIZ)
766 if (!strcmp(name, ".") || !strcmp(name, ".."))
770 if (*name == '/' || isspace(*name))
778 * __dev_alloc_name - allocate a name for a device
779 * @net: network namespace to allocate the device name in
780 * @name: name format string
781 * @buf: scratch buffer and result name string
783 * Passed a format string - eg "lt%d" it will try and find a suitable
784 * id. It scans list of devices to build up a free map, then chooses
785 * the first empty slot. The caller must hold the dev_base or rtnl lock
786 * while allocating the name and adding the device in order to avoid
788 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
789 * Returns the number of the unit assigned or a negative errno code.
792 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
796 const int max_netdevices = 8*PAGE_SIZE;
797 unsigned long *inuse;
798 struct net_device *d;
800 p = strnchr(name, IFNAMSIZ-1, '%');
803 * Verify the string as this thing may have come from
804 * the user. There must be either one "%d" and no other "%"
807 if (p[1] != 'd' || strchr(p + 2, '%'))
810 /* Use one page as a bit array of possible slots */
811 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
815 for_each_netdev(net, d) {
816 if (!sscanf(d->name, name, &i))
818 if (i < 0 || i >= max_netdevices)
821 /* avoid cases where sscanf is not exact inverse of printf */
822 snprintf(buf, IFNAMSIZ, name, i);
823 if (!strncmp(buf, d->name, IFNAMSIZ))
827 i = find_first_zero_bit(inuse, max_netdevices);
828 free_page((unsigned long) inuse);
831 snprintf(buf, IFNAMSIZ, name, i);
832 if (!__dev_get_by_name(net, buf))
835 /* It is possible to run out of possible slots
836 * when the name is long and there isn't enough space left
837 * for the digits, or if all bits are used.
843 * dev_alloc_name - allocate a name for a device
845 * @name: name format string
847 * Passed a format string - eg "lt%d" it will try and find a suitable
848 * id. It scans list of devices to build up a free map, then chooses
849 * the first empty slot. The caller must hold the dev_base or rtnl lock
850 * while allocating the name and adding the device in order to avoid
852 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
853 * Returns the number of the unit assigned or a negative errno code.
856 int dev_alloc_name(struct net_device *dev, const char *name)
862 BUG_ON(!dev_net(dev));
864 ret = __dev_alloc_name(net, name, buf);
866 strlcpy(dev->name, buf, IFNAMSIZ);
872 * dev_change_name - change name of a device
874 * @newname: name (or format string) must be at least IFNAMSIZ
876 * Change name of a device, can pass format strings "eth%d".
879 int dev_change_name(struct net_device *dev, char *newname)
881 char oldname[IFNAMSIZ];
887 BUG_ON(!dev_net(dev));
890 if (dev->flags & IFF_UP)
893 if (!dev_valid_name(newname))
896 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
899 memcpy(oldname, dev->name, IFNAMSIZ);
901 if (strchr(newname, '%')) {
902 err = dev_alloc_name(dev, newname);
905 strcpy(newname, dev->name);
907 else if (__dev_get_by_name(net, newname))
910 strlcpy(dev->name, newname, IFNAMSIZ);
913 err = device_rename(&dev->dev, dev->name);
915 memcpy(dev->name, oldname, IFNAMSIZ);
919 write_lock_bh(&dev_base_lock);
920 hlist_del(&dev->name_hlist);
921 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
922 write_unlock_bh(&dev_base_lock);
924 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
925 ret = notifier_to_errno(ret);
930 "%s: name change rollback failed: %d.\n",
934 memcpy(dev->name, oldname, IFNAMSIZ);
943 * netdev_features_change - device changes features
944 * @dev: device to cause notification
946 * Called to indicate a device has changed features.
948 void netdev_features_change(struct net_device *dev)
950 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
952 EXPORT_SYMBOL(netdev_features_change);
955 * netdev_state_change - device changes state
956 * @dev: device to cause notification
958 * Called to indicate a device has changed state. This function calls
959 * the notifier chains for netdev_chain and sends a NEWLINK message
960 * to the routing socket.
962 void netdev_state_change(struct net_device *dev)
964 if (dev->flags & IFF_UP) {
965 call_netdevice_notifiers(NETDEV_CHANGE, dev);
966 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
970 void netdev_bonding_change(struct net_device *dev)
972 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
974 EXPORT_SYMBOL(netdev_bonding_change);
977 * dev_load - load a network module
978 * @net: the applicable net namespace
979 * @name: name of interface
981 * If a network interface is not present and the process has suitable
982 * privileges this function loads the module. If module loading is not
983 * available in this kernel then it becomes a nop.
986 void dev_load(struct net *net, const char *name)
988 struct net_device *dev;
990 read_lock(&dev_base_lock);
991 dev = __dev_get_by_name(net, name);
992 read_unlock(&dev_base_lock);
994 if (!dev && capable(CAP_SYS_MODULE))
995 request_module("%s", name);
999 * dev_open - prepare an interface for use.
1000 * @dev: device to open
1002 * Takes a device from down to up state. The device's private open
1003 * function is invoked and then the multicast lists are loaded. Finally
1004 * the device is moved into the up state and a %NETDEV_UP message is
1005 * sent to the netdev notifier chain.
1007 * Calling this function on an active interface is a nop. On a failure
1008 * a negative errno code is returned.
1010 int dev_open(struct net_device *dev)
1020 if (dev->flags & IFF_UP)
1024 * Is it even present?
1026 if (!netif_device_present(dev))
1030 * Call device private open method
1032 set_bit(__LINK_STATE_START, &dev->state);
1034 if (dev->validate_addr)
1035 ret = dev->validate_addr(dev);
1037 if (!ret && dev->open)
1038 ret = dev->open(dev);
1041 * If it went open OK then:
1045 clear_bit(__LINK_STATE_START, &dev->state);
1050 dev->flags |= IFF_UP;
1053 * Initialize multicasting status
1055 dev_set_rx_mode(dev);
1058 * Wakeup transmit queue engine
1063 * ... and announce new interface.
1065 call_netdevice_notifiers(NETDEV_UP, dev);
1072 * dev_close - shutdown an interface.
1073 * @dev: device to shutdown
1075 * This function moves an active device into down state. A
1076 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1077 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1080 int dev_close(struct net_device *dev)
1086 if (!(dev->flags & IFF_UP))
1090 * Tell people we are going down, so that they can
1091 * prepare to death, when device is still operating.
1093 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1095 clear_bit(__LINK_STATE_START, &dev->state);
1097 /* Synchronize to scheduled poll. We cannot touch poll list,
1098 * it can be even on different cpu. So just clear netif_running().
1100 * dev->stop() will invoke napi_disable() on all of it's
1101 * napi_struct instances on this device.
1103 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1105 dev_deactivate(dev);
1108 * Call the device specific close. This cannot fail.
1109 * Only if device is UP
1111 * We allow it to be called even after a DETACH hot-plug
1118 * Device is now down.
1121 dev->flags &= ~IFF_UP;
1124 * Tell people we are down
1126 call_netdevice_notifiers(NETDEV_DOWN, dev);
1133 * dev_disable_lro - disable Large Receive Offload on a device
1136 * Disable Large Receive Offload (LRO) on a net device. Must be
1137 * called under RTNL. This is needed if received packets may be
1138 * forwarded to another interface.
1140 void dev_disable_lro(struct net_device *dev)
1142 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1143 dev->ethtool_ops->set_flags) {
1144 u32 flags = dev->ethtool_ops->get_flags(dev);
1145 if (flags & ETH_FLAG_LRO) {
1146 flags &= ~ETH_FLAG_LRO;
1147 dev->ethtool_ops->set_flags(dev, flags);
1150 WARN_ON(dev->features & NETIF_F_LRO);
1152 EXPORT_SYMBOL(dev_disable_lro);
1155 static int dev_boot_phase = 1;
1158 * Device change register/unregister. These are not inline or static
1159 * as we export them to the world.
1163 * register_netdevice_notifier - register a network notifier block
1166 * Register a notifier to be called when network device events occur.
1167 * The notifier passed is linked into the kernel structures and must
1168 * not be reused until it has been unregistered. A negative errno code
1169 * is returned on a failure.
1171 * When registered all registration and up events are replayed
1172 * to the new notifier to allow device to have a race free
1173 * view of the network device list.
1176 int register_netdevice_notifier(struct notifier_block *nb)
1178 struct net_device *dev;
1179 struct net_device *last;
1184 err = raw_notifier_chain_register(&netdev_chain, nb);
1190 for_each_netdev(net, dev) {
1191 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1192 err = notifier_to_errno(err);
1196 if (!(dev->flags & IFF_UP))
1199 nb->notifier_call(nb, NETDEV_UP, dev);
1210 for_each_netdev(net, dev) {
1214 if (dev->flags & IFF_UP) {
1215 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1216 nb->notifier_call(nb, NETDEV_DOWN, dev);
1218 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1222 raw_notifier_chain_unregister(&netdev_chain, nb);
1227 * unregister_netdevice_notifier - unregister a network notifier block
1230 * Unregister a notifier previously registered by
1231 * register_netdevice_notifier(). The notifier is unlinked into the
1232 * kernel structures and may then be reused. A negative errno code
1233 * is returned on a failure.
1236 int unregister_netdevice_notifier(struct notifier_block *nb)
1241 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1247 * call_netdevice_notifiers - call all network notifier blocks
1248 * @val: value passed unmodified to notifier function
1249 * @dev: net_device pointer passed unmodified to notifier function
1251 * Call all network notifier blocks. Parameters and return value
1252 * are as for raw_notifier_call_chain().
1255 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1257 return raw_notifier_call_chain(&netdev_chain, val, dev);
1260 /* When > 0 there are consumers of rx skb time stamps */
1261 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1263 void net_enable_timestamp(void)
1265 atomic_inc(&netstamp_needed);
1268 void net_disable_timestamp(void)
1270 atomic_dec(&netstamp_needed);
1273 static inline void net_timestamp(struct sk_buff *skb)
1275 if (atomic_read(&netstamp_needed))
1276 __net_timestamp(skb);
1278 skb->tstamp.tv64 = 0;
1282 * Support routine. Sends outgoing frames to any network
1283 * taps currently in use.
1286 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1288 struct packet_type *ptype;
1293 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1294 /* Never send packets back to the socket
1295 * they originated from - MvS (miquels@drinkel.ow.org)
1297 if ((ptype->dev == dev || !ptype->dev) &&
1298 (ptype->af_packet_priv == NULL ||
1299 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1300 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1304 /* skb->nh should be correctly
1305 set by sender, so that the second statement is
1306 just protection against buggy protocols.
1308 skb_reset_mac_header(skb2);
1310 if (skb_network_header(skb2) < skb2->data ||
1311 skb2->network_header > skb2->tail) {
1312 if (net_ratelimit())
1313 printk(KERN_CRIT "protocol %04x is "
1315 skb2->protocol, dev->name);
1316 skb_reset_network_header(skb2);
1319 skb2->transport_header = skb2->network_header;
1320 skb2->pkt_type = PACKET_OUTGOING;
1321 ptype->func(skb2, skb->dev, ptype, skb->dev);
1328 void __netif_schedule(struct Qdisc *q)
1330 if (WARN_ON_ONCE(q == &noop_qdisc))
1333 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1334 struct softnet_data *sd;
1335 unsigned long flags;
1337 local_irq_save(flags);
1338 sd = &__get_cpu_var(softnet_data);
1339 q->next_sched = sd->output_queue;
1340 sd->output_queue = q;
1341 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1342 local_irq_restore(flags);
1345 EXPORT_SYMBOL(__netif_schedule);
1347 void dev_kfree_skb_irq(struct sk_buff *skb)
1349 if (atomic_dec_and_test(&skb->users)) {
1350 struct softnet_data *sd;
1351 unsigned long flags;
1353 local_irq_save(flags);
1354 sd = &__get_cpu_var(softnet_data);
1355 skb->next = sd->completion_queue;
1356 sd->completion_queue = skb;
1357 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1358 local_irq_restore(flags);
1361 EXPORT_SYMBOL(dev_kfree_skb_irq);
1363 void dev_kfree_skb_any(struct sk_buff *skb)
1365 if (in_irq() || irqs_disabled())
1366 dev_kfree_skb_irq(skb);
1370 EXPORT_SYMBOL(dev_kfree_skb_any);
1374 * netif_device_detach - mark device as removed
1375 * @dev: network device
1377 * Mark device as removed from system and therefore no longer available.
1379 void netif_device_detach(struct net_device *dev)
1381 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1382 netif_running(dev)) {
1383 netif_stop_queue(dev);
1386 EXPORT_SYMBOL(netif_device_detach);
1389 * netif_device_attach - mark device as attached
1390 * @dev: network device
1392 * Mark device as attached from system and restart if needed.
1394 void netif_device_attach(struct net_device *dev)
1396 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1397 netif_running(dev)) {
1398 netif_wake_queue(dev);
1399 __netdev_watchdog_up(dev);
1402 EXPORT_SYMBOL(netif_device_attach);
1404 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1406 return ((features & NETIF_F_GEN_CSUM) ||
1407 ((features & NETIF_F_IP_CSUM) &&
1408 protocol == htons(ETH_P_IP)) ||
1409 ((features & NETIF_F_IPV6_CSUM) &&
1410 protocol == htons(ETH_P_IPV6)));
1413 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1415 if (can_checksum_protocol(dev->features, skb->protocol))
1418 if (skb->protocol == htons(ETH_P_8021Q)) {
1419 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1420 if (can_checksum_protocol(dev->features & dev->vlan_features,
1421 veh->h_vlan_encapsulated_proto))
1429 * Invalidate hardware checksum when packet is to be mangled, and
1430 * complete checksum manually on outgoing path.
1432 int skb_checksum_help(struct sk_buff *skb)
1435 int ret = 0, offset;
1437 if (skb->ip_summed == CHECKSUM_COMPLETE)
1438 goto out_set_summed;
1440 if (unlikely(skb_shinfo(skb)->gso_size)) {
1441 /* Let GSO fix up the checksum. */
1442 goto out_set_summed;
1445 offset = skb->csum_start - skb_headroom(skb);
1446 BUG_ON(offset >= skb_headlen(skb));
1447 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1449 offset += skb->csum_offset;
1450 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1452 if (skb_cloned(skb) &&
1453 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1454 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1459 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1461 skb->ip_summed = CHECKSUM_NONE;
1467 * skb_gso_segment - Perform segmentation on skb.
1468 * @skb: buffer to segment
1469 * @features: features for the output path (see dev->features)
1471 * This function segments the given skb and returns a list of segments.
1473 * It may return NULL if the skb requires no segmentation. This is
1474 * only possible when GSO is used for verifying header integrity.
1476 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1478 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1479 struct packet_type *ptype;
1480 __be16 type = skb->protocol;
1483 BUG_ON(skb_shinfo(skb)->frag_list);
1485 skb_reset_mac_header(skb);
1486 skb->mac_len = skb->network_header - skb->mac_header;
1487 __skb_pull(skb, skb->mac_len);
1489 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1490 if (skb_header_cloned(skb) &&
1491 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1492 return ERR_PTR(err);
1496 list_for_each_entry_rcu(ptype,
1497 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1498 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1499 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1500 err = ptype->gso_send_check(skb);
1501 segs = ERR_PTR(err);
1502 if (err || skb_gso_ok(skb, features))
1504 __skb_push(skb, (skb->data -
1505 skb_network_header(skb)));
1507 segs = ptype->gso_segment(skb, features);
1513 __skb_push(skb, skb->data - skb_mac_header(skb));
1518 EXPORT_SYMBOL(skb_gso_segment);
1520 /* Take action when hardware reception checksum errors are detected. */
1522 void netdev_rx_csum_fault(struct net_device *dev)
1524 if (net_ratelimit()) {
1525 printk(KERN_ERR "%s: hw csum failure.\n",
1526 dev ? dev->name : "<unknown>");
1530 EXPORT_SYMBOL(netdev_rx_csum_fault);
1533 /* Actually, we should eliminate this check as soon as we know, that:
1534 * 1. IOMMU is present and allows to map all the memory.
1535 * 2. No high memory really exists on this machine.
1538 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1540 #ifdef CONFIG_HIGHMEM
1543 if (dev->features & NETIF_F_HIGHDMA)
1546 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1547 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1555 void (*destructor)(struct sk_buff *skb);
1558 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1560 static void dev_gso_skb_destructor(struct sk_buff *skb)
1562 struct dev_gso_cb *cb;
1565 struct sk_buff *nskb = skb->next;
1567 skb->next = nskb->next;
1570 } while (skb->next);
1572 cb = DEV_GSO_CB(skb);
1574 cb->destructor(skb);
1578 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1579 * @skb: buffer to segment
1581 * This function segments the given skb and stores the list of segments
1584 static int dev_gso_segment(struct sk_buff *skb)
1586 struct net_device *dev = skb->dev;
1587 struct sk_buff *segs;
1588 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1591 segs = skb_gso_segment(skb, features);
1593 /* Verifying header integrity only. */
1598 return PTR_ERR(segs);
1601 DEV_GSO_CB(skb)->destructor = skb->destructor;
1602 skb->destructor = dev_gso_skb_destructor;
1607 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1608 struct netdev_queue *txq)
1610 if (likely(!skb->next)) {
1611 if (!list_empty(&ptype_all))
1612 dev_queue_xmit_nit(skb, dev);
1614 if (netif_needs_gso(dev, skb)) {
1615 if (unlikely(dev_gso_segment(skb)))
1621 return dev->hard_start_xmit(skb, dev);
1626 struct sk_buff *nskb = skb->next;
1629 skb->next = nskb->next;
1631 rc = dev->hard_start_xmit(nskb, dev);
1633 nskb->next = skb->next;
1637 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1638 return NETDEV_TX_BUSY;
1639 } while (skb->next);
1641 skb->destructor = DEV_GSO_CB(skb)->destructor;
1648 static u32 simple_tx_hashrnd;
1649 static int simple_tx_hashrnd_initialized = 0;
1651 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1653 u32 addr1, addr2, ports;
1657 if (unlikely(!simple_tx_hashrnd_initialized)) {
1658 get_random_bytes(&simple_tx_hashrnd, 4);
1659 simple_tx_hashrnd_initialized = 1;
1662 switch (skb->protocol) {
1663 case __constant_htons(ETH_P_IP):
1664 ip_proto = ip_hdr(skb)->protocol;
1665 addr1 = ip_hdr(skb)->saddr;
1666 addr2 = ip_hdr(skb)->daddr;
1667 ihl = ip_hdr(skb)->ihl;
1669 case __constant_htons(ETH_P_IPV6):
1670 ip_proto = ipv6_hdr(skb)->nexthdr;
1671 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1672 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1687 case IPPROTO_UDPLITE:
1688 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1696 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1698 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1701 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1702 struct sk_buff *skb)
1704 u16 queue_index = 0;
1706 if (dev->select_queue)
1707 queue_index = dev->select_queue(dev, skb);
1708 else if (dev->real_num_tx_queues > 1)
1709 queue_index = simple_tx_hash(dev, skb);
1711 skb_set_queue_mapping(skb, queue_index);
1712 return netdev_get_tx_queue(dev, queue_index);
1716 * dev_queue_xmit - transmit a buffer
1717 * @skb: buffer to transmit
1719 * Queue a buffer for transmission to a network device. The caller must
1720 * have set the device and priority and built the buffer before calling
1721 * this function. The function can be called from an interrupt.
1723 * A negative errno code is returned on a failure. A success does not
1724 * guarantee the frame will be transmitted as it may be dropped due
1725 * to congestion or traffic shaping.
1727 * -----------------------------------------------------------------------------------
1728 * I notice this method can also return errors from the queue disciplines,
1729 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1732 * Regardless of the return value, the skb is consumed, so it is currently
1733 * difficult to retry a send to this method. (You can bump the ref count
1734 * before sending to hold a reference for retry if you are careful.)
1736 * When calling this method, interrupts MUST be enabled. This is because
1737 * the BH enable code must have IRQs enabled so that it will not deadlock.
1740 int dev_queue_xmit(struct sk_buff *skb)
1742 struct net_device *dev = skb->dev;
1743 struct netdev_queue *txq;
1747 /* GSO will handle the following emulations directly. */
1748 if (netif_needs_gso(dev, skb))
1751 if (skb_shinfo(skb)->frag_list &&
1752 !(dev->features & NETIF_F_FRAGLIST) &&
1753 __skb_linearize(skb))
1756 /* Fragmented skb is linearized if device does not support SG,
1757 * or if at least one of fragments is in highmem and device
1758 * does not support DMA from it.
1760 if (skb_shinfo(skb)->nr_frags &&
1761 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1762 __skb_linearize(skb))
1765 /* If packet is not checksummed and device does not support
1766 * checksumming for this protocol, complete checksumming here.
1768 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1769 skb_set_transport_header(skb, skb->csum_start -
1771 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1776 /* Disable soft irqs for various locks below. Also
1777 * stops preemption for RCU.
1781 txq = dev_pick_tx(dev, skb);
1782 q = rcu_dereference(txq->qdisc);
1784 #ifdef CONFIG_NET_CLS_ACT
1785 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1788 spinlock_t *root_lock = qdisc_root_lock(q);
1790 spin_lock(root_lock);
1792 rc = qdisc_enqueue_root(skb, q);
1795 spin_unlock(root_lock);
1797 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1801 /* The device has no queue. Common case for software devices:
1802 loopback, all the sorts of tunnels...
1804 Really, it is unlikely that netif_tx_lock protection is necessary
1805 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1807 However, it is possible, that they rely on protection
1810 Check this and shot the lock. It is not prone from deadlocks.
1811 Either shot noqueue qdisc, it is even simpler 8)
1813 if (dev->flags & IFF_UP) {
1814 int cpu = smp_processor_id(); /* ok because BHs are off */
1816 if (txq->xmit_lock_owner != cpu) {
1818 HARD_TX_LOCK(dev, txq, cpu);
1820 if (!netif_tx_queue_stopped(txq)) {
1822 if (!dev_hard_start_xmit(skb, dev, txq)) {
1823 HARD_TX_UNLOCK(dev, txq);
1827 HARD_TX_UNLOCK(dev, txq);
1828 if (net_ratelimit())
1829 printk(KERN_CRIT "Virtual device %s asks to "
1830 "queue packet!\n", dev->name);
1832 /* Recursion is detected! It is possible,
1834 if (net_ratelimit())
1835 printk(KERN_CRIT "Dead loop on virtual device "
1836 "%s, fix it urgently!\n", dev->name);
1841 rcu_read_unlock_bh();
1847 rcu_read_unlock_bh();
1852 /*=======================================================================
1854 =======================================================================*/
1856 int netdev_max_backlog __read_mostly = 1000;
1857 int netdev_budget __read_mostly = 300;
1858 int weight_p __read_mostly = 64; /* old backlog weight */
1860 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1864 * netif_rx - post buffer to the network code
1865 * @skb: buffer to post
1867 * This function receives a packet from a device driver and queues it for
1868 * the upper (protocol) levels to process. It always succeeds. The buffer
1869 * may be dropped during processing for congestion control or by the
1873 * NET_RX_SUCCESS (no congestion)
1874 * NET_RX_DROP (packet was dropped)
1878 int netif_rx(struct sk_buff *skb)
1880 struct softnet_data *queue;
1881 unsigned long flags;
1883 /* if netpoll wants it, pretend we never saw it */
1884 if (netpoll_rx(skb))
1887 if (!skb->tstamp.tv64)
1891 * The code is rearranged so that the path is the most
1892 * short when CPU is congested, but is still operating.
1894 local_irq_save(flags);
1895 queue = &__get_cpu_var(softnet_data);
1897 __get_cpu_var(netdev_rx_stat).total++;
1898 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1899 if (queue->input_pkt_queue.qlen) {
1902 __skb_queue_tail(&queue->input_pkt_queue, skb);
1903 local_irq_restore(flags);
1904 return NET_RX_SUCCESS;
1907 napi_schedule(&queue->backlog);
1911 __get_cpu_var(netdev_rx_stat).dropped++;
1912 local_irq_restore(flags);
1918 int netif_rx_ni(struct sk_buff *skb)
1923 err = netif_rx(skb);
1924 if (local_softirq_pending())
1931 EXPORT_SYMBOL(netif_rx_ni);
1933 static inline struct net_device *skb_bond(struct sk_buff *skb)
1935 struct net_device *dev = skb->dev;
1938 if (skb_bond_should_drop(skb)) {
1942 skb->dev = dev->master;
1949 static void net_tx_action(struct softirq_action *h)
1951 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1953 if (sd->completion_queue) {
1954 struct sk_buff *clist;
1956 local_irq_disable();
1957 clist = sd->completion_queue;
1958 sd->completion_queue = NULL;
1962 struct sk_buff *skb = clist;
1963 clist = clist->next;
1965 BUG_TRAP(!atomic_read(&skb->users));
1970 if (sd->output_queue) {
1973 local_irq_disable();
1974 head = sd->output_queue;
1975 sd->output_queue = NULL;
1979 struct Qdisc *q = head;
1980 spinlock_t *root_lock;
1982 head = head->next_sched;
1984 smp_mb__before_clear_bit();
1985 clear_bit(__QDISC_STATE_SCHED, &q->state);
1987 root_lock = qdisc_root_lock(q);
1988 if (spin_trylock(root_lock)) {
1990 spin_unlock(root_lock);
1992 __netif_schedule(q);
1998 static inline int deliver_skb(struct sk_buff *skb,
1999 struct packet_type *pt_prev,
2000 struct net_device *orig_dev)
2002 atomic_inc(&skb->users);
2003 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2006 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2007 /* These hooks defined here for ATM */
2009 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2010 unsigned char *addr);
2011 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2014 * If bridge module is loaded call bridging hook.
2015 * returns NULL if packet was consumed.
2017 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2018 struct sk_buff *skb) __read_mostly;
2019 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2020 struct packet_type **pt_prev, int *ret,
2021 struct net_device *orig_dev)
2023 struct net_bridge_port *port;
2025 if (skb->pkt_type == PACKET_LOOPBACK ||
2026 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2030 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2034 return br_handle_frame_hook(port, skb);
2037 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2040 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2041 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2042 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2044 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2045 struct packet_type **pt_prev,
2047 struct net_device *orig_dev)
2049 if (skb->dev->macvlan_port == NULL)
2053 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2056 return macvlan_handle_frame_hook(skb);
2059 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2062 #ifdef CONFIG_NET_CLS_ACT
2063 /* TODO: Maybe we should just force sch_ingress to be compiled in
2064 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2065 * a compare and 2 stores extra right now if we dont have it on
2066 * but have CONFIG_NET_CLS_ACT
2067 * NOTE: This doesnt stop any functionality; if you dont have
2068 * the ingress scheduler, you just cant add policies on ingress.
2071 static int ing_filter(struct sk_buff *skb)
2073 struct net_device *dev = skb->dev;
2074 u32 ttl = G_TC_RTTL(skb->tc_verd);
2075 struct netdev_queue *rxq;
2076 int result = TC_ACT_OK;
2079 if (MAX_RED_LOOP < ttl++) {
2081 "Redir loop detected Dropping packet (%d->%d)\n",
2082 skb->iif, dev->ifindex);
2086 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2087 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2089 rxq = &dev->rx_queue;
2093 spin_lock(qdisc_lock(q));
2094 result = qdisc_enqueue_root(skb, q);
2095 spin_unlock(qdisc_lock(q));
2101 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2102 struct packet_type **pt_prev,
2103 int *ret, struct net_device *orig_dev)
2105 if (!skb->dev->rx_queue.qdisc)
2109 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2112 /* Huh? Why does turning on AF_PACKET affect this? */
2113 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2116 switch (ing_filter(skb)) {
2130 * netif_nit_deliver - deliver received packets to network taps
2133 * This function is used to deliver incoming packets to network
2134 * taps. It should be used when the normal netif_receive_skb path
2135 * is bypassed, for example because of VLAN acceleration.
2137 void netif_nit_deliver(struct sk_buff *skb)
2139 struct packet_type *ptype;
2141 if (list_empty(&ptype_all))
2144 skb_reset_network_header(skb);
2145 skb_reset_transport_header(skb);
2146 skb->mac_len = skb->network_header - skb->mac_header;
2149 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2150 if (!ptype->dev || ptype->dev == skb->dev)
2151 deliver_skb(skb, ptype, skb->dev);
2157 * netif_receive_skb - process receive buffer from network
2158 * @skb: buffer to process
2160 * netif_receive_skb() is the main receive data processing function.
2161 * It always succeeds. The buffer may be dropped during processing
2162 * for congestion control or by the protocol layers.
2164 * This function may only be called from softirq context and interrupts
2165 * should be enabled.
2167 * Return values (usually ignored):
2168 * NET_RX_SUCCESS: no congestion
2169 * NET_RX_DROP: packet was dropped
2171 int netif_receive_skb(struct sk_buff *skb)
2173 struct packet_type *ptype, *pt_prev;
2174 struct net_device *orig_dev;
2175 int ret = NET_RX_DROP;
2178 /* if we've gotten here through NAPI, check netpoll */
2179 if (netpoll_receive_skb(skb))
2182 if (!skb->tstamp.tv64)
2186 skb->iif = skb->dev->ifindex;
2188 orig_dev = skb_bond(skb);
2193 __get_cpu_var(netdev_rx_stat).total++;
2195 skb_reset_network_header(skb);
2196 skb_reset_transport_header(skb);
2197 skb->mac_len = skb->network_header - skb->mac_header;
2203 /* Don't receive packets in an exiting network namespace */
2204 if (!net_alive(dev_net(skb->dev)))
2207 #ifdef CONFIG_NET_CLS_ACT
2208 if (skb->tc_verd & TC_NCLS) {
2209 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2214 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2215 if (!ptype->dev || ptype->dev == skb->dev) {
2217 ret = deliver_skb(skb, pt_prev, orig_dev);
2222 #ifdef CONFIG_NET_CLS_ACT
2223 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2229 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2232 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2236 type = skb->protocol;
2237 list_for_each_entry_rcu(ptype,
2238 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2239 if (ptype->type == type &&
2240 (!ptype->dev || ptype->dev == skb->dev)) {
2242 ret = deliver_skb(skb, pt_prev, orig_dev);
2248 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2251 /* Jamal, now you will not able to escape explaining
2252 * me how you were going to use this. :-)
2262 static int process_backlog(struct napi_struct *napi, int quota)
2265 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2266 unsigned long start_time = jiffies;
2268 napi->weight = weight_p;
2270 struct sk_buff *skb;
2271 struct net_device *dev;
2273 local_irq_disable();
2274 skb = __skb_dequeue(&queue->input_pkt_queue);
2276 __napi_complete(napi);
2285 netif_receive_skb(skb);
2288 } while (++work < quota && jiffies == start_time);
2294 * __napi_schedule - schedule for receive
2295 * @n: entry to schedule
2297 * The entry's receive function will be scheduled to run
2299 void __napi_schedule(struct napi_struct *n)
2301 unsigned long flags;
2303 local_irq_save(flags);
2304 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2305 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2306 local_irq_restore(flags);
2308 EXPORT_SYMBOL(__napi_schedule);
2311 static void net_rx_action(struct softirq_action *h)
2313 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2314 unsigned long start_time = jiffies;
2315 int budget = netdev_budget;
2318 local_irq_disable();
2320 while (!list_empty(list)) {
2321 struct napi_struct *n;
2324 /* If softirq window is exhuasted then punt.
2326 * Note that this is a slight policy change from the
2327 * previous NAPI code, which would allow up to 2
2328 * jiffies to pass before breaking out. The test
2329 * used to be "jiffies - start_time > 1".
2331 if (unlikely(budget <= 0 || jiffies != start_time))
2336 /* Even though interrupts have been re-enabled, this
2337 * access is safe because interrupts can only add new
2338 * entries to the tail of this list, and only ->poll()
2339 * calls can remove this head entry from the list.
2341 n = list_entry(list->next, struct napi_struct, poll_list);
2343 have = netpoll_poll_lock(n);
2347 /* This NAPI_STATE_SCHED test is for avoiding a race
2348 * with netpoll's poll_napi(). Only the entity which
2349 * obtains the lock and sees NAPI_STATE_SCHED set will
2350 * actually make the ->poll() call. Therefore we avoid
2351 * accidently calling ->poll() when NAPI is not scheduled.
2354 if (test_bit(NAPI_STATE_SCHED, &n->state))
2355 work = n->poll(n, weight);
2357 WARN_ON_ONCE(work > weight);
2361 local_irq_disable();
2363 /* Drivers must not modify the NAPI state if they
2364 * consume the entire weight. In such cases this code
2365 * still "owns" the NAPI instance and therefore can
2366 * move the instance around on the list at-will.
2368 if (unlikely(work == weight)) {
2369 if (unlikely(napi_disable_pending(n)))
2372 list_move_tail(&n->poll_list, list);
2375 netpoll_poll_unlock(have);
2380 #ifdef CONFIG_NET_DMA
2382 * There may not be any more sk_buffs coming right now, so push
2383 * any pending DMA copies to hardware
2385 if (!cpus_empty(net_dma.channel_mask)) {
2387 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2388 struct dma_chan *chan = net_dma.channels[chan_idx];
2390 dma_async_memcpy_issue_pending(chan);
2398 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2399 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2403 static gifconf_func_t * gifconf_list [NPROTO];
2406 * register_gifconf - register a SIOCGIF handler
2407 * @family: Address family
2408 * @gifconf: Function handler
2410 * Register protocol dependent address dumping routines. The handler
2411 * that is passed must not be freed or reused until it has been replaced
2412 * by another handler.
2414 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2416 if (family >= NPROTO)
2418 gifconf_list[family] = gifconf;
2424 * Map an interface index to its name (SIOCGIFNAME)
2428 * We need this ioctl for efficient implementation of the
2429 * if_indextoname() function required by the IPv6 API. Without
2430 * it, we would have to search all the interfaces to find a
2434 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2436 struct net_device *dev;
2440 * Fetch the caller's info block.
2443 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2446 read_lock(&dev_base_lock);
2447 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2449 read_unlock(&dev_base_lock);
2453 strcpy(ifr.ifr_name, dev->name);
2454 read_unlock(&dev_base_lock);
2456 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2462 * Perform a SIOCGIFCONF call. This structure will change
2463 * size eventually, and there is nothing I can do about it.
2464 * Thus we will need a 'compatibility mode'.
2467 static int dev_ifconf(struct net *net, char __user *arg)
2470 struct net_device *dev;
2477 * Fetch the caller's info block.
2480 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2487 * Loop over the interfaces, and write an info block for each.
2491 for_each_netdev(net, dev) {
2492 for (i = 0; i < NPROTO; i++) {
2493 if (gifconf_list[i]) {
2496 done = gifconf_list[i](dev, NULL, 0);
2498 done = gifconf_list[i](dev, pos + total,
2508 * All done. Write the updated control block back to the caller.
2510 ifc.ifc_len = total;
2513 * Both BSD and Solaris return 0 here, so we do too.
2515 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2518 #ifdef CONFIG_PROC_FS
2520 * This is invoked by the /proc filesystem handler to display a device
2523 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2524 __acquires(dev_base_lock)
2526 struct net *net = seq_file_net(seq);
2528 struct net_device *dev;
2530 read_lock(&dev_base_lock);
2532 return SEQ_START_TOKEN;
2535 for_each_netdev(net, dev)
2542 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2544 struct net *net = seq_file_net(seq);
2546 return v == SEQ_START_TOKEN ?
2547 first_net_device(net) : next_net_device((struct net_device *)v);
2550 void dev_seq_stop(struct seq_file *seq, void *v)
2551 __releases(dev_base_lock)
2553 read_unlock(&dev_base_lock);
2556 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2558 struct net_device_stats *stats = dev->get_stats(dev);
2560 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2561 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2562 dev->name, stats->rx_bytes, stats->rx_packets,
2564 stats->rx_dropped + stats->rx_missed_errors,
2565 stats->rx_fifo_errors,
2566 stats->rx_length_errors + stats->rx_over_errors +
2567 stats->rx_crc_errors + stats->rx_frame_errors,
2568 stats->rx_compressed, stats->multicast,
2569 stats->tx_bytes, stats->tx_packets,
2570 stats->tx_errors, stats->tx_dropped,
2571 stats->tx_fifo_errors, stats->collisions,
2572 stats->tx_carrier_errors +
2573 stats->tx_aborted_errors +
2574 stats->tx_window_errors +
2575 stats->tx_heartbeat_errors,
2576 stats->tx_compressed);
2580 * Called from the PROCfs module. This now uses the new arbitrary sized
2581 * /proc/net interface to create /proc/net/dev
2583 static int dev_seq_show(struct seq_file *seq, void *v)
2585 if (v == SEQ_START_TOKEN)
2586 seq_puts(seq, "Inter-| Receive "
2588 " face |bytes packets errs drop fifo frame "
2589 "compressed multicast|bytes packets errs "
2590 "drop fifo colls carrier compressed\n");
2592 dev_seq_printf_stats(seq, v);
2596 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2598 struct netif_rx_stats *rc = NULL;
2600 while (*pos < nr_cpu_ids)
2601 if (cpu_online(*pos)) {
2602 rc = &per_cpu(netdev_rx_stat, *pos);
2609 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2611 return softnet_get_online(pos);
2614 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2617 return softnet_get_online(pos);
2620 static void softnet_seq_stop(struct seq_file *seq, void *v)
2624 static int softnet_seq_show(struct seq_file *seq, void *v)
2626 struct netif_rx_stats *s = v;
2628 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2629 s->total, s->dropped, s->time_squeeze, 0,
2630 0, 0, 0, 0, /* was fastroute */
2635 static const struct seq_operations dev_seq_ops = {
2636 .start = dev_seq_start,
2637 .next = dev_seq_next,
2638 .stop = dev_seq_stop,
2639 .show = dev_seq_show,
2642 static int dev_seq_open(struct inode *inode, struct file *file)
2644 return seq_open_net(inode, file, &dev_seq_ops,
2645 sizeof(struct seq_net_private));
2648 static const struct file_operations dev_seq_fops = {
2649 .owner = THIS_MODULE,
2650 .open = dev_seq_open,
2652 .llseek = seq_lseek,
2653 .release = seq_release_net,
2656 static const struct seq_operations softnet_seq_ops = {
2657 .start = softnet_seq_start,
2658 .next = softnet_seq_next,
2659 .stop = softnet_seq_stop,
2660 .show = softnet_seq_show,
2663 static int softnet_seq_open(struct inode *inode, struct file *file)
2665 return seq_open(file, &softnet_seq_ops);
2668 static const struct file_operations softnet_seq_fops = {
2669 .owner = THIS_MODULE,
2670 .open = softnet_seq_open,
2672 .llseek = seq_lseek,
2673 .release = seq_release,
2676 static void *ptype_get_idx(loff_t pos)
2678 struct packet_type *pt = NULL;
2682 list_for_each_entry_rcu(pt, &ptype_all, list) {
2688 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2689 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2698 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2702 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2705 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2707 struct packet_type *pt;
2708 struct list_head *nxt;
2712 if (v == SEQ_START_TOKEN)
2713 return ptype_get_idx(0);
2716 nxt = pt->list.next;
2717 if (pt->type == htons(ETH_P_ALL)) {
2718 if (nxt != &ptype_all)
2721 nxt = ptype_base[0].next;
2723 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2725 while (nxt == &ptype_base[hash]) {
2726 if (++hash >= PTYPE_HASH_SIZE)
2728 nxt = ptype_base[hash].next;
2731 return list_entry(nxt, struct packet_type, list);
2734 static void ptype_seq_stop(struct seq_file *seq, void *v)
2740 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2742 #ifdef CONFIG_KALLSYMS
2743 unsigned long offset = 0, symsize;
2744 const char *symname;
2748 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2755 modname = delim = "";
2756 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2762 seq_printf(seq, "[%p]", sym);
2765 static int ptype_seq_show(struct seq_file *seq, void *v)
2767 struct packet_type *pt = v;
2769 if (v == SEQ_START_TOKEN)
2770 seq_puts(seq, "Type Device Function\n");
2771 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2772 if (pt->type == htons(ETH_P_ALL))
2773 seq_puts(seq, "ALL ");
2775 seq_printf(seq, "%04x", ntohs(pt->type));
2777 seq_printf(seq, " %-8s ",
2778 pt->dev ? pt->dev->name : "");
2779 ptype_seq_decode(seq, pt->func);
2780 seq_putc(seq, '\n');
2786 static const struct seq_operations ptype_seq_ops = {
2787 .start = ptype_seq_start,
2788 .next = ptype_seq_next,
2789 .stop = ptype_seq_stop,
2790 .show = ptype_seq_show,
2793 static int ptype_seq_open(struct inode *inode, struct file *file)
2795 return seq_open_net(inode, file, &ptype_seq_ops,
2796 sizeof(struct seq_net_private));
2799 static const struct file_operations ptype_seq_fops = {
2800 .owner = THIS_MODULE,
2801 .open = ptype_seq_open,
2803 .llseek = seq_lseek,
2804 .release = seq_release_net,
2808 static int __net_init dev_proc_net_init(struct net *net)
2812 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2814 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2816 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2819 if (wext_proc_init(net))
2825 proc_net_remove(net, "ptype");
2827 proc_net_remove(net, "softnet_stat");
2829 proc_net_remove(net, "dev");
2833 static void __net_exit dev_proc_net_exit(struct net *net)
2835 wext_proc_exit(net);
2837 proc_net_remove(net, "ptype");
2838 proc_net_remove(net, "softnet_stat");
2839 proc_net_remove(net, "dev");
2842 static struct pernet_operations __net_initdata dev_proc_ops = {
2843 .init = dev_proc_net_init,
2844 .exit = dev_proc_net_exit,
2847 static int __init dev_proc_init(void)
2849 return register_pernet_subsys(&dev_proc_ops);
2852 #define dev_proc_init() 0
2853 #endif /* CONFIG_PROC_FS */
2857 * netdev_set_master - set up master/slave pair
2858 * @slave: slave device
2859 * @master: new master device
2861 * Changes the master device of the slave. Pass %NULL to break the
2862 * bonding. The caller must hold the RTNL semaphore. On a failure
2863 * a negative errno code is returned. On success the reference counts
2864 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2865 * function returns zero.
2867 int netdev_set_master(struct net_device *slave, struct net_device *master)
2869 struct net_device *old = slave->master;
2879 slave->master = master;
2887 slave->flags |= IFF_SLAVE;
2889 slave->flags &= ~IFF_SLAVE;
2891 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2895 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2897 unsigned short old_flags = dev->flags;
2901 dev->flags |= IFF_PROMISC;
2902 dev->promiscuity += inc;
2903 if (dev->promiscuity == 0) {
2906 * If inc causes overflow, untouch promisc and return error.
2909 dev->flags &= ~IFF_PROMISC;
2911 dev->promiscuity -= inc;
2912 printk(KERN_WARNING "%s: promiscuity touches roof, "
2913 "set promiscuity failed, promiscuity feature "
2914 "of device might be broken.\n", dev->name);
2918 if (dev->flags != old_flags) {
2919 printk(KERN_INFO "device %s %s promiscuous mode\n",
2920 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2923 audit_log(current->audit_context, GFP_ATOMIC,
2924 AUDIT_ANOM_PROMISCUOUS,
2925 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2926 dev->name, (dev->flags & IFF_PROMISC),
2927 (old_flags & IFF_PROMISC),
2928 audit_get_loginuid(current),
2929 current->uid, current->gid,
2930 audit_get_sessionid(current));
2932 if (dev->change_rx_flags)
2933 dev->change_rx_flags(dev, IFF_PROMISC);
2939 * dev_set_promiscuity - update promiscuity count on a device
2943 * Add or remove promiscuity from a device. While the count in the device
2944 * remains above zero the interface remains promiscuous. Once it hits zero
2945 * the device reverts back to normal filtering operation. A negative inc
2946 * value is used to drop promiscuity on the device.
2947 * Return 0 if successful or a negative errno code on error.
2949 int dev_set_promiscuity(struct net_device *dev, int inc)
2951 unsigned short old_flags = dev->flags;
2954 err = __dev_set_promiscuity(dev, inc);
2957 if (dev->flags != old_flags)
2958 dev_set_rx_mode(dev);
2963 * dev_set_allmulti - update allmulti count on a device
2967 * Add or remove reception of all multicast frames to a device. While the
2968 * count in the device remains above zero the interface remains listening
2969 * to all interfaces. Once it hits zero the device reverts back to normal
2970 * filtering operation. A negative @inc value is used to drop the counter
2971 * when releasing a resource needing all multicasts.
2972 * Return 0 if successful or a negative errno code on error.
2975 int dev_set_allmulti(struct net_device *dev, int inc)
2977 unsigned short old_flags = dev->flags;
2981 dev->flags |= IFF_ALLMULTI;
2982 dev->allmulti += inc;
2983 if (dev->allmulti == 0) {
2986 * If inc causes overflow, untouch allmulti and return error.
2989 dev->flags &= ~IFF_ALLMULTI;
2991 dev->allmulti -= inc;
2992 printk(KERN_WARNING "%s: allmulti touches roof, "
2993 "set allmulti failed, allmulti feature of "
2994 "device might be broken.\n", dev->name);
2998 if (dev->flags ^ old_flags) {
2999 if (dev->change_rx_flags)
3000 dev->change_rx_flags(dev, IFF_ALLMULTI);
3001 dev_set_rx_mode(dev);
3007 * Upload unicast and multicast address lists to device and
3008 * configure RX filtering. When the device doesn't support unicast
3009 * filtering it is put in promiscuous mode while unicast addresses
3012 void __dev_set_rx_mode(struct net_device *dev)
3014 /* dev_open will call this function so the list will stay sane. */
3015 if (!(dev->flags&IFF_UP))
3018 if (!netif_device_present(dev))
3021 if (dev->set_rx_mode)
3022 dev->set_rx_mode(dev);
3024 /* Unicast addresses changes may only happen under the rtnl,
3025 * therefore calling __dev_set_promiscuity here is safe.
3027 if (dev->uc_count > 0 && !dev->uc_promisc) {
3028 __dev_set_promiscuity(dev, 1);
3029 dev->uc_promisc = 1;
3030 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3031 __dev_set_promiscuity(dev, -1);
3032 dev->uc_promisc = 0;
3035 if (dev->set_multicast_list)
3036 dev->set_multicast_list(dev);
3040 void dev_set_rx_mode(struct net_device *dev)
3042 netif_addr_lock_bh(dev);
3043 __dev_set_rx_mode(dev);
3044 netif_addr_unlock_bh(dev);
3047 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3048 void *addr, int alen, int glbl)
3050 struct dev_addr_list *da;
3052 for (; (da = *list) != NULL; list = &da->next) {
3053 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3054 alen == da->da_addrlen) {
3056 int old_glbl = da->da_gusers;
3073 int __dev_addr_add(struct dev_addr_list **list, int *count,
3074 void *addr, int alen, int glbl)
3076 struct dev_addr_list *da;
3078 for (da = *list; da != NULL; da = da->next) {
3079 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3080 da->da_addrlen == alen) {
3082 int old_glbl = da->da_gusers;
3092 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3095 memcpy(da->da_addr, addr, alen);
3096 da->da_addrlen = alen;
3098 da->da_gusers = glbl ? 1 : 0;
3106 * dev_unicast_delete - Release secondary unicast address.
3108 * @addr: address to delete
3109 * @alen: length of @addr
3111 * Release reference to a secondary unicast address and remove it
3112 * from the device if the reference count drops to zero.
3114 * The caller must hold the rtnl_mutex.
3116 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3122 netif_addr_lock_bh(dev);
3123 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3125 __dev_set_rx_mode(dev);
3126 netif_addr_unlock_bh(dev);
3129 EXPORT_SYMBOL(dev_unicast_delete);
3132 * dev_unicast_add - add a secondary unicast address
3134 * @addr: address to add
3135 * @alen: length of @addr
3137 * Add a secondary unicast address to the device or increase
3138 * the reference count if it already exists.
3140 * The caller must hold the rtnl_mutex.
3142 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3148 netif_addr_lock_bh(dev);
3149 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3151 __dev_set_rx_mode(dev);
3152 netif_addr_unlock_bh(dev);
3155 EXPORT_SYMBOL(dev_unicast_add);
3157 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3158 struct dev_addr_list **from, int *from_count)
3160 struct dev_addr_list *da, *next;
3164 while (da != NULL) {
3166 if (!da->da_synced) {
3167 err = __dev_addr_add(to, to_count,
3168 da->da_addr, da->da_addrlen, 0);
3173 } else if (da->da_users == 1) {
3174 __dev_addr_delete(to, to_count,
3175 da->da_addr, da->da_addrlen, 0);
3176 __dev_addr_delete(from, from_count,
3177 da->da_addr, da->da_addrlen, 0);
3184 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3185 struct dev_addr_list **from, int *from_count)
3187 struct dev_addr_list *da, *next;
3190 while (da != NULL) {
3192 if (da->da_synced) {
3193 __dev_addr_delete(to, to_count,
3194 da->da_addr, da->da_addrlen, 0);
3196 __dev_addr_delete(from, from_count,
3197 da->da_addr, da->da_addrlen, 0);
3204 * dev_unicast_sync - Synchronize device's unicast list to another device
3205 * @to: destination device
3206 * @from: source device
3208 * Add newly added addresses to the destination device and release
3209 * addresses that have no users left. The source device must be
3210 * locked by netif_tx_lock_bh.
3212 * This function is intended to be called from the dev->set_rx_mode
3213 * function of layered software devices.
3215 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3219 netif_addr_lock_bh(to);
3220 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3221 &from->uc_list, &from->uc_count);
3223 __dev_set_rx_mode(to);
3224 netif_addr_unlock_bh(to);
3227 EXPORT_SYMBOL(dev_unicast_sync);
3230 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3231 * @to: destination device
3232 * @from: source device
3234 * Remove all addresses that were added to the destination device by
3235 * dev_unicast_sync(). This function is intended to be called from the
3236 * dev->stop function of layered software devices.
3238 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3240 netif_addr_lock_bh(from);
3241 netif_addr_lock(to);
3243 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3244 &from->uc_list, &from->uc_count);
3245 __dev_set_rx_mode(to);
3247 netif_addr_unlock(to);
3248 netif_addr_unlock_bh(from);
3250 EXPORT_SYMBOL(dev_unicast_unsync);
3252 static void __dev_addr_discard(struct dev_addr_list **list)
3254 struct dev_addr_list *tmp;
3256 while (*list != NULL) {
3259 if (tmp->da_users > tmp->da_gusers)
3260 printk("__dev_addr_discard: address leakage! "
3261 "da_users=%d\n", tmp->da_users);
3266 static void dev_addr_discard(struct net_device *dev)
3268 netif_addr_lock_bh(dev);
3270 __dev_addr_discard(&dev->uc_list);
3273 __dev_addr_discard(&dev->mc_list);
3276 netif_addr_unlock_bh(dev);
3279 unsigned dev_get_flags(const struct net_device *dev)
3283 flags = (dev->flags & ~(IFF_PROMISC |
3288 (dev->gflags & (IFF_PROMISC |
3291 if (netif_running(dev)) {
3292 if (netif_oper_up(dev))
3293 flags |= IFF_RUNNING;
3294 if (netif_carrier_ok(dev))
3295 flags |= IFF_LOWER_UP;
3296 if (netif_dormant(dev))
3297 flags |= IFF_DORMANT;
3303 int dev_change_flags(struct net_device *dev, unsigned flags)
3306 int old_flags = dev->flags;
3311 * Set the flags on our device.
3314 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3315 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3317 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3321 * Load in the correct multicast list now the flags have changed.
3324 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3325 dev->change_rx_flags(dev, IFF_MULTICAST);
3327 dev_set_rx_mode(dev);
3330 * Have we downed the interface. We handle IFF_UP ourselves
3331 * according to user attempts to set it, rather than blindly
3336 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3337 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3340 dev_set_rx_mode(dev);
3343 if (dev->flags & IFF_UP &&
3344 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3346 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3348 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3349 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3350 dev->gflags ^= IFF_PROMISC;
3351 dev_set_promiscuity(dev, inc);
3354 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3355 is important. Some (broken) drivers set IFF_PROMISC, when
3356 IFF_ALLMULTI is requested not asking us and not reporting.
3358 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3359 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3360 dev->gflags ^= IFF_ALLMULTI;
3361 dev_set_allmulti(dev, inc);
3364 /* Exclude state transition flags, already notified */
3365 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3367 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3372 int dev_set_mtu(struct net_device *dev, int new_mtu)
3376 if (new_mtu == dev->mtu)
3379 /* MTU must be positive. */
3383 if (!netif_device_present(dev))
3387 if (dev->change_mtu)
3388 err = dev->change_mtu(dev, new_mtu);
3391 if (!err && dev->flags & IFF_UP)
3392 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3396 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3400 if (!dev->set_mac_address)
3402 if (sa->sa_family != dev->type)
3404 if (!netif_device_present(dev))
3406 err = dev->set_mac_address(dev, sa);
3408 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3413 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3415 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3418 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3424 case SIOCGIFFLAGS: /* Get interface flags */
3425 ifr->ifr_flags = dev_get_flags(dev);
3428 case SIOCGIFMETRIC: /* Get the metric on the interface
3429 (currently unused) */
3430 ifr->ifr_metric = 0;
3433 case SIOCGIFMTU: /* Get the MTU of a device */
3434 ifr->ifr_mtu = dev->mtu;
3439 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3441 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3442 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3443 ifr->ifr_hwaddr.sa_family = dev->type;
3451 ifr->ifr_map.mem_start = dev->mem_start;
3452 ifr->ifr_map.mem_end = dev->mem_end;
3453 ifr->ifr_map.base_addr = dev->base_addr;
3454 ifr->ifr_map.irq = dev->irq;
3455 ifr->ifr_map.dma = dev->dma;
3456 ifr->ifr_map.port = dev->if_port;
3460 ifr->ifr_ifindex = dev->ifindex;
3464 ifr->ifr_qlen = dev->tx_queue_len;
3468 /* dev_ioctl() should ensure this case
3480 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3482 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3485 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3491 case SIOCSIFFLAGS: /* Set interface flags */
3492 return dev_change_flags(dev, ifr->ifr_flags);
3494 case SIOCSIFMETRIC: /* Set the metric on the interface
3495 (currently unused) */
3498 case SIOCSIFMTU: /* Set the MTU of a device */
3499 return dev_set_mtu(dev, ifr->ifr_mtu);
3502 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3504 case SIOCSIFHWBROADCAST:
3505 if (ifr->ifr_hwaddr.sa_family != dev->type)
3507 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3508 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3509 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3513 if (dev->set_config) {
3514 if (!netif_device_present(dev))
3516 return dev->set_config(dev, &ifr->ifr_map);
3521 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3522 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3524 if (!netif_device_present(dev))
3526 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3530 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3531 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3533 if (!netif_device_present(dev))
3535 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3539 if (ifr->ifr_qlen < 0)
3541 dev->tx_queue_len = ifr->ifr_qlen;
3545 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3546 return dev_change_name(dev, ifr->ifr_newname);
3549 * Unknown or private ioctl
3553 if ((cmd >= SIOCDEVPRIVATE &&
3554 cmd <= SIOCDEVPRIVATE + 15) ||
3555 cmd == SIOCBONDENSLAVE ||
3556 cmd == SIOCBONDRELEASE ||
3557 cmd == SIOCBONDSETHWADDR ||
3558 cmd == SIOCBONDSLAVEINFOQUERY ||
3559 cmd == SIOCBONDINFOQUERY ||
3560 cmd == SIOCBONDCHANGEACTIVE ||
3561 cmd == SIOCGMIIPHY ||
3562 cmd == SIOCGMIIREG ||
3563 cmd == SIOCSMIIREG ||
3564 cmd == SIOCBRADDIF ||
3565 cmd == SIOCBRDELIF ||
3566 cmd == SIOCWANDEV) {
3568 if (dev->do_ioctl) {
3569 if (netif_device_present(dev))
3570 err = dev->do_ioctl(dev, ifr,
3583 * This function handles all "interface"-type I/O control requests. The actual
3584 * 'doing' part of this is dev_ifsioc above.
3588 * dev_ioctl - network device ioctl
3589 * @net: the applicable net namespace
3590 * @cmd: command to issue
3591 * @arg: pointer to a struct ifreq in user space
3593 * Issue ioctl functions to devices. This is normally called by the
3594 * user space syscall interfaces but can sometimes be useful for
3595 * other purposes. The return value is the return from the syscall if
3596 * positive or a negative errno code on error.
3599 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3605 /* One special case: SIOCGIFCONF takes ifconf argument
3606 and requires shared lock, because it sleeps writing
3610 if (cmd == SIOCGIFCONF) {
3612 ret = dev_ifconf(net, (char __user *) arg);
3616 if (cmd == SIOCGIFNAME)
3617 return dev_ifname(net, (struct ifreq __user *)arg);
3619 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3622 ifr.ifr_name[IFNAMSIZ-1] = 0;
3624 colon = strchr(ifr.ifr_name, ':');
3629 * See which interface the caller is talking about.
3634 * These ioctl calls:
3635 * - can be done by all.
3636 * - atomic and do not require locking.
3647 dev_load(net, ifr.ifr_name);
3648 read_lock(&dev_base_lock);
3649 ret = dev_ifsioc_locked(net, &ifr, cmd);
3650 read_unlock(&dev_base_lock);
3654 if (copy_to_user(arg, &ifr,
3655 sizeof(struct ifreq)))
3661 dev_load(net, ifr.ifr_name);
3663 ret = dev_ethtool(net, &ifr);
3668 if (copy_to_user(arg, &ifr,
3669 sizeof(struct ifreq)))
3675 * These ioctl calls:
3676 * - require superuser power.
3677 * - require strict serialization.
3683 if (!capable(CAP_NET_ADMIN))
3685 dev_load(net, ifr.ifr_name);
3687 ret = dev_ifsioc(net, &ifr, cmd);
3692 if (copy_to_user(arg, &ifr,
3693 sizeof(struct ifreq)))
3699 * These ioctl calls:
3700 * - require superuser power.
3701 * - require strict serialization.
3702 * - do not return a value
3712 case SIOCSIFHWBROADCAST:
3715 case SIOCBONDENSLAVE:
3716 case SIOCBONDRELEASE:
3717 case SIOCBONDSETHWADDR:
3718 case SIOCBONDCHANGEACTIVE:
3721 if (!capable(CAP_NET_ADMIN))
3724 case SIOCBONDSLAVEINFOQUERY:
3725 case SIOCBONDINFOQUERY:
3726 dev_load(net, ifr.ifr_name);
3728 ret = dev_ifsioc(net, &ifr, cmd);
3733 /* Get the per device memory space. We can add this but
3734 * currently do not support it */
3736 /* Set the per device memory buffer space.
3737 * Not applicable in our case */
3742 * Unknown or private ioctl.
3745 if (cmd == SIOCWANDEV ||
3746 (cmd >= SIOCDEVPRIVATE &&
3747 cmd <= SIOCDEVPRIVATE + 15)) {
3748 dev_load(net, ifr.ifr_name);
3750 ret = dev_ifsioc(net, &ifr, cmd);
3752 if (!ret && copy_to_user(arg, &ifr,
3753 sizeof(struct ifreq)))
3757 /* Take care of Wireless Extensions */
3758 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3759 return wext_handle_ioctl(net, &ifr, cmd, arg);
3766 * dev_new_index - allocate an ifindex
3767 * @net: the applicable net namespace
3769 * Returns a suitable unique value for a new device interface
3770 * number. The caller must hold the rtnl semaphore or the
3771 * dev_base_lock to be sure it remains unique.
3773 static int dev_new_index(struct net *net)
3779 if (!__dev_get_by_index(net, ifindex))
3784 /* Delayed registration/unregisteration */
3785 static DEFINE_SPINLOCK(net_todo_list_lock);
3786 static LIST_HEAD(net_todo_list);
3788 static void net_set_todo(struct net_device *dev)
3790 spin_lock(&net_todo_list_lock);
3791 list_add_tail(&dev->todo_list, &net_todo_list);
3792 spin_unlock(&net_todo_list_lock);
3795 static void rollback_registered(struct net_device *dev)
3797 BUG_ON(dev_boot_phase);
3800 /* Some devices call without registering for initialization unwind. */
3801 if (dev->reg_state == NETREG_UNINITIALIZED) {
3802 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3803 "was registered\n", dev->name, dev);
3809 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3811 /* If device is running, close it first. */
3814 /* And unlink it from device chain. */
3815 unlist_netdevice(dev);
3817 dev->reg_state = NETREG_UNREGISTERING;
3821 /* Shutdown queueing discipline. */
3825 /* Notify protocols, that we are about to destroy
3826 this device. They should clean all the things.
3828 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3831 * Flush the unicast and multicast chains
3833 dev_addr_discard(dev);
3838 /* Notifier chain MUST detach us from master device. */
3839 BUG_TRAP(!dev->master);
3841 /* Remove entries from kobject tree */
3842 netdev_unregister_kobject(dev);
3849 static void __netdev_init_queue_locks_one(struct net_device *dev,
3850 struct netdev_queue *dev_queue,
3853 spin_lock_init(&dev_queue->_xmit_lock);
3854 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3855 dev_queue->xmit_lock_owner = -1;
3858 static void netdev_init_queue_locks(struct net_device *dev)
3860 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3861 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3865 * register_netdevice - register a network device
3866 * @dev: device to register
3868 * Take a completed network device structure and add it to the kernel
3869 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3870 * chain. 0 is returned on success. A negative errno code is returned
3871 * on a failure to set up the device, or if the name is a duplicate.
3873 * Callers must hold the rtnl semaphore. You may want
3874 * register_netdev() instead of this.
3877 * The locking appears insufficient to guarantee two parallel registers
3878 * will not get the same name.
3881 int register_netdevice(struct net_device *dev)
3883 struct hlist_head *head;
3884 struct hlist_node *p;
3888 BUG_ON(dev_boot_phase);
3893 /* When net_device's are persistent, this will be fatal. */
3894 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3895 BUG_ON(!dev_net(dev));
3898 spin_lock_init(&dev->addr_list_lock);
3899 netdev_init_queue_locks(dev);
3903 /* Init, if this function is available */
3905 ret = dev->init(dev);
3913 if (!dev_valid_name(dev->name)) {
3918 dev->ifindex = dev_new_index(net);
3919 if (dev->iflink == -1)
3920 dev->iflink = dev->ifindex;
3922 /* Check for existence of name */
3923 head = dev_name_hash(net, dev->name);
3924 hlist_for_each(p, head) {
3925 struct net_device *d
3926 = hlist_entry(p, struct net_device, name_hlist);
3927 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3933 /* Fix illegal checksum combinations */
3934 if ((dev->features & NETIF_F_HW_CSUM) &&
3935 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3936 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3938 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3941 if ((dev->features & NETIF_F_NO_CSUM) &&
3942 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3943 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3945 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3949 /* Fix illegal SG+CSUM combinations. */
3950 if ((dev->features & NETIF_F_SG) &&
3951 !(dev->features & NETIF_F_ALL_CSUM)) {
3952 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3954 dev->features &= ~NETIF_F_SG;
3957 /* TSO requires that SG is present as well. */
3958 if ((dev->features & NETIF_F_TSO) &&
3959 !(dev->features & NETIF_F_SG)) {
3960 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3962 dev->features &= ~NETIF_F_TSO;
3964 if (dev->features & NETIF_F_UFO) {
3965 if (!(dev->features & NETIF_F_HW_CSUM)) {
3966 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3967 "NETIF_F_HW_CSUM feature.\n",
3969 dev->features &= ~NETIF_F_UFO;
3971 if (!(dev->features & NETIF_F_SG)) {
3972 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3973 "NETIF_F_SG feature.\n",
3975 dev->features &= ~NETIF_F_UFO;
3979 netdev_initialize_kobject(dev);
3980 ret = netdev_register_kobject(dev);
3983 dev->reg_state = NETREG_REGISTERED;
3986 * Default initial state at registry is that the
3987 * device is present.
3990 set_bit(__LINK_STATE_PRESENT, &dev->state);
3992 dev_init_scheduler(dev);
3994 list_netdevice(dev);
3996 /* Notify protocols, that a new device appeared. */
3997 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3998 ret = notifier_to_errno(ret);
4000 rollback_registered(dev);
4001 dev->reg_state = NETREG_UNREGISTERED;
4014 * register_netdev - register a network device
4015 * @dev: device to register
4017 * Take a completed network device structure and add it to the kernel
4018 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4019 * chain. 0 is returned on success. A negative errno code is returned
4020 * on a failure to set up the device, or if the name is a duplicate.
4022 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4023 * and expands the device name if you passed a format string to
4026 int register_netdev(struct net_device *dev)
4033 * If the name is a format string the caller wants us to do a
4036 if (strchr(dev->name, '%')) {
4037 err = dev_alloc_name(dev, dev->name);
4042 err = register_netdevice(dev);
4047 EXPORT_SYMBOL(register_netdev);
4050 * netdev_wait_allrefs - wait until all references are gone.
4052 * This is called when unregistering network devices.
4054 * Any protocol or device that holds a reference should register
4055 * for netdevice notification, and cleanup and put back the
4056 * reference if they receive an UNREGISTER event.
4057 * We can get stuck here if buggy protocols don't correctly
4060 static void netdev_wait_allrefs(struct net_device *dev)
4062 unsigned long rebroadcast_time, warning_time;
4064 rebroadcast_time = warning_time = jiffies;
4065 while (atomic_read(&dev->refcnt) != 0) {
4066 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4069 /* Rebroadcast unregister notification */
4070 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4072 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4074 /* We must not have linkwatch events
4075 * pending on unregister. If this
4076 * happens, we simply run the queue
4077 * unscheduled, resulting in a noop
4080 linkwatch_run_queue();
4085 rebroadcast_time = jiffies;
4090 if (time_after(jiffies, warning_time + 10 * HZ)) {
4091 printk(KERN_EMERG "unregister_netdevice: "
4092 "waiting for %s to become free. Usage "
4094 dev->name, atomic_read(&dev->refcnt));
4095 warning_time = jiffies;
4104 * register_netdevice(x1);
4105 * register_netdevice(x2);
4107 * unregister_netdevice(y1);
4108 * unregister_netdevice(y2);
4114 * We are invoked by rtnl_unlock() after it drops the semaphore.
4115 * This allows us to deal with problems:
4116 * 1) We can delete sysfs objects which invoke hotplug
4117 * without deadlocking with linkwatch via keventd.
4118 * 2) Since we run with the RTNL semaphore not held, we can sleep
4119 * safely in order to wait for the netdev refcnt to drop to zero.
4121 static DEFINE_MUTEX(net_todo_run_mutex);
4122 void netdev_run_todo(void)
4124 struct list_head list;
4126 /* Need to guard against multiple cpu's getting out of order. */
4127 mutex_lock(&net_todo_run_mutex);
4129 /* Not safe to do outside the semaphore. We must not return
4130 * until all unregister events invoked by the local processor
4131 * have been completed (either by this todo run, or one on
4134 if (list_empty(&net_todo_list))
4137 /* Snapshot list, allow later requests */
4138 spin_lock(&net_todo_list_lock);
4139 list_replace_init(&net_todo_list, &list);
4140 spin_unlock(&net_todo_list_lock);
4142 while (!list_empty(&list)) {
4143 struct net_device *dev
4144 = list_entry(list.next, struct net_device, todo_list);
4145 list_del(&dev->todo_list);
4147 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4148 printk(KERN_ERR "network todo '%s' but state %d\n",
4149 dev->name, dev->reg_state);
4154 dev->reg_state = NETREG_UNREGISTERED;
4156 netdev_wait_allrefs(dev);
4159 BUG_ON(atomic_read(&dev->refcnt));
4160 BUG_TRAP(!dev->ip_ptr);
4161 BUG_TRAP(!dev->ip6_ptr);
4162 BUG_TRAP(!dev->dn_ptr);
4164 if (dev->destructor)
4165 dev->destructor(dev);
4167 /* Free network device */
4168 kobject_put(&dev->dev.kobj);
4172 mutex_unlock(&net_todo_run_mutex);
4175 static struct net_device_stats *internal_stats(struct net_device *dev)
4180 static void netdev_init_one_queue(struct net_device *dev,
4181 struct netdev_queue *queue,
4187 static void netdev_init_queues(struct net_device *dev)
4189 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4190 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4194 * alloc_netdev_mq - allocate network device
4195 * @sizeof_priv: size of private data to allocate space for
4196 * @name: device name format string
4197 * @setup: callback to initialize device
4198 * @queue_count: the number of subqueues to allocate
4200 * Allocates a struct net_device with private data area for driver use
4201 * and performs basic initialization. Also allocates subquue structs
4202 * for each queue on the device at the end of the netdevice.
4204 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4205 void (*setup)(struct net_device *), unsigned int queue_count)
4207 struct netdev_queue *tx;
4208 struct net_device *dev;
4212 BUG_ON(strlen(name) >= sizeof(dev->name));
4214 alloc_size = sizeof(struct net_device);
4216 /* ensure 32-byte alignment of private area */
4217 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4218 alloc_size += sizeof_priv;
4220 /* ensure 32-byte alignment of whole construct */
4221 alloc_size += NETDEV_ALIGN_CONST;
4223 p = kzalloc(alloc_size, GFP_KERNEL);
4225 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4229 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4231 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4237 dev = (struct net_device *)
4238 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4239 dev->padded = (char *)dev - (char *)p;
4240 dev_net_set(dev, &init_net);
4243 dev->num_tx_queues = queue_count;
4244 dev->real_num_tx_queues = queue_count;
4247 dev->priv = ((char *)dev +
4248 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4249 & ~NETDEV_ALIGN_CONST));
4252 dev->gso_max_size = GSO_MAX_SIZE;
4254 netdev_init_queues(dev);
4256 dev->get_stats = internal_stats;
4257 netpoll_netdev_init(dev);
4259 strcpy(dev->name, name);
4262 EXPORT_SYMBOL(alloc_netdev_mq);
4265 * free_netdev - free network device
4268 * This function does the last stage of destroying an allocated device
4269 * interface. The reference to the device object is released.
4270 * If this is the last reference then it will be freed.
4272 void free_netdev(struct net_device *dev)
4274 release_net(dev_net(dev));
4278 /* Compatibility with error handling in drivers */
4279 if (dev->reg_state == NETREG_UNINITIALIZED) {
4280 kfree((char *)dev - dev->padded);
4284 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4285 dev->reg_state = NETREG_RELEASED;
4287 /* will free via device release */
4288 put_device(&dev->dev);
4291 /* Synchronize with packet receive processing. */
4292 void synchronize_net(void)
4299 * unregister_netdevice - remove device from the kernel
4302 * This function shuts down a device interface and removes it
4303 * from the kernel tables.
4305 * Callers must hold the rtnl semaphore. You may want
4306 * unregister_netdev() instead of this.
4309 void unregister_netdevice(struct net_device *dev)
4313 rollback_registered(dev);
4314 /* Finish processing unregister after unlock */
4319 * unregister_netdev - remove device from the kernel
4322 * This function shuts down a device interface and removes it
4323 * from the kernel tables.
4325 * This is just a wrapper for unregister_netdevice that takes
4326 * the rtnl semaphore. In general you want to use this and not
4327 * unregister_netdevice.
4329 void unregister_netdev(struct net_device *dev)
4332 unregister_netdevice(dev);
4336 EXPORT_SYMBOL(unregister_netdev);
4339 * dev_change_net_namespace - move device to different nethost namespace
4341 * @net: network namespace
4342 * @pat: If not NULL name pattern to try if the current device name
4343 * is already taken in the destination network namespace.
4345 * This function shuts down a device interface and moves it
4346 * to a new network namespace. On success 0 is returned, on
4347 * a failure a netagive errno code is returned.
4349 * Callers must hold the rtnl semaphore.
4352 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4355 const char *destname;
4360 /* Don't allow namespace local devices to be moved. */
4362 if (dev->features & NETIF_F_NETNS_LOCAL)
4365 /* Ensure the device has been registrered */
4367 if (dev->reg_state != NETREG_REGISTERED)
4370 /* Get out if there is nothing todo */
4372 if (net_eq(dev_net(dev), net))
4375 /* Pick the destination device name, and ensure
4376 * we can use it in the destination network namespace.
4379 destname = dev->name;
4380 if (__dev_get_by_name(net, destname)) {
4381 /* We get here if we can't use the current device name */
4384 if (!dev_valid_name(pat))
4386 if (strchr(pat, '%')) {
4387 if (__dev_alloc_name(net, pat, buf) < 0)
4392 if (__dev_get_by_name(net, destname))
4397 * And now a mini version of register_netdevice unregister_netdevice.
4400 /* If device is running close it first. */
4403 /* And unlink it from device chain */
4405 unlist_netdevice(dev);
4409 /* Shutdown queueing discipline. */
4412 /* Notify protocols, that we are about to destroy
4413 this device. They should clean all the things.
4415 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4418 * Flush the unicast and multicast chains
4420 dev_addr_discard(dev);
4422 /* Actually switch the network namespace */
4423 dev_net_set(dev, net);
4425 /* Assign the new device name */
4426 if (destname != dev->name)
4427 strcpy(dev->name, destname);
4429 /* If there is an ifindex conflict assign a new one */
4430 if (__dev_get_by_index(net, dev->ifindex)) {
4431 int iflink = (dev->iflink == dev->ifindex);
4432 dev->ifindex = dev_new_index(net);
4434 dev->iflink = dev->ifindex;
4437 /* Fixup kobjects */
4438 netdev_unregister_kobject(dev);
4439 err = netdev_register_kobject(dev);
4442 /* Add the device back in the hashes */
4443 list_netdevice(dev);
4445 /* Notify protocols, that a new device appeared. */
4446 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4454 static int dev_cpu_callback(struct notifier_block *nfb,
4455 unsigned long action,
4458 struct sk_buff **list_skb;
4459 struct Qdisc **list_net;
4460 struct sk_buff *skb;
4461 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4462 struct softnet_data *sd, *oldsd;
4464 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4467 local_irq_disable();
4468 cpu = smp_processor_id();
4469 sd = &per_cpu(softnet_data, cpu);
4470 oldsd = &per_cpu(softnet_data, oldcpu);
4472 /* Find end of our completion_queue. */
4473 list_skb = &sd->completion_queue;
4475 list_skb = &(*list_skb)->next;
4476 /* Append completion queue from offline CPU. */
4477 *list_skb = oldsd->completion_queue;
4478 oldsd->completion_queue = NULL;
4480 /* Find end of our output_queue. */
4481 list_net = &sd->output_queue;
4483 list_net = &(*list_net)->next_sched;
4484 /* Append output queue from offline CPU. */
4485 *list_net = oldsd->output_queue;
4486 oldsd->output_queue = NULL;
4488 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4491 /* Process offline CPU's input_pkt_queue */
4492 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4498 #ifdef CONFIG_NET_DMA
4500 * net_dma_rebalance - try to maintain one DMA channel per CPU
4501 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4503 * This is called when the number of channels allocated to the net_dma client
4504 * changes. The net_dma client tries to have one DMA channel per CPU.
4507 static void net_dma_rebalance(struct net_dma *net_dma)
4509 unsigned int cpu, i, n, chan_idx;
4510 struct dma_chan *chan;
4512 if (cpus_empty(net_dma->channel_mask)) {
4513 for_each_online_cpu(cpu)
4514 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4519 cpu = first_cpu(cpu_online_map);
4521 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4522 chan = net_dma->channels[chan_idx];
4524 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4525 + (i < (num_online_cpus() %
4526 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4529 per_cpu(softnet_data, cpu).net_dma = chan;
4530 cpu = next_cpu(cpu, cpu_online_map);
4538 * netdev_dma_event - event callback for the net_dma_client
4539 * @client: should always be net_dma_client
4540 * @chan: DMA channel for the event
4541 * @state: DMA state to be handled
4543 static enum dma_state_client
4544 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4545 enum dma_state state)
4547 int i, found = 0, pos = -1;
4548 struct net_dma *net_dma =
4549 container_of(client, struct net_dma, client);
4550 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4552 spin_lock(&net_dma->lock);
4554 case DMA_RESOURCE_AVAILABLE:
4555 for (i = 0; i < nr_cpu_ids; i++)
4556 if (net_dma->channels[i] == chan) {
4559 } else if (net_dma->channels[i] == NULL && pos < 0)
4562 if (!found && pos >= 0) {
4564 net_dma->channels[pos] = chan;
4565 cpu_set(pos, net_dma->channel_mask);
4566 net_dma_rebalance(net_dma);
4569 case DMA_RESOURCE_REMOVED:
4570 for (i = 0; i < nr_cpu_ids; i++)
4571 if (net_dma->channels[i] == chan) {
4579 cpu_clear(pos, net_dma->channel_mask);
4580 net_dma->channels[i] = NULL;
4581 net_dma_rebalance(net_dma);
4587 spin_unlock(&net_dma->lock);
4593 * netdev_dma_regiser - register the networking subsystem as a DMA client
4595 static int __init netdev_dma_register(void)
4597 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4599 if (unlikely(!net_dma.channels)) {
4601 "netdev_dma: no memory for net_dma.channels\n");
4604 spin_lock_init(&net_dma.lock);
4605 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4606 dma_async_client_register(&net_dma.client);
4607 dma_async_client_chan_request(&net_dma.client);
4612 static int __init netdev_dma_register(void) { return -ENODEV; }
4613 #endif /* CONFIG_NET_DMA */
4616 * netdev_compute_feature - compute conjunction of two feature sets
4617 * @all: first feature set
4618 * @one: second feature set
4620 * Computes a new feature set after adding a device with feature set
4621 * @one to the master device with current feature set @all. Returns
4622 * the new feature set.
4624 int netdev_compute_features(unsigned long all, unsigned long one)
4626 /* if device needs checksumming, downgrade to hw checksumming */
4627 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4628 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4630 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4631 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4632 all ^= NETIF_F_HW_CSUM
4633 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4635 if (one & NETIF_F_GSO)
4636 one |= NETIF_F_GSO_SOFTWARE;
4639 /* If even one device supports robust GSO, enable it for all. */
4640 if (one & NETIF_F_GSO_ROBUST)
4641 all |= NETIF_F_GSO_ROBUST;
4643 all &= one | NETIF_F_LLTX;
4645 if (!(all & NETIF_F_ALL_CSUM))
4647 if (!(all & NETIF_F_SG))
4648 all &= ~NETIF_F_GSO_MASK;
4652 EXPORT_SYMBOL(netdev_compute_features);
4654 static struct hlist_head *netdev_create_hash(void)
4657 struct hlist_head *hash;
4659 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4661 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4662 INIT_HLIST_HEAD(&hash[i]);
4667 /* Initialize per network namespace state */
4668 static int __net_init netdev_init(struct net *net)
4670 INIT_LIST_HEAD(&net->dev_base_head);
4672 net->dev_name_head = netdev_create_hash();
4673 if (net->dev_name_head == NULL)
4676 net->dev_index_head = netdev_create_hash();
4677 if (net->dev_index_head == NULL)
4683 kfree(net->dev_name_head);
4688 char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4690 struct device_driver *driver;
4691 struct device *parent;
4693 if (len <= 0 || !buffer)
4697 parent = dev->dev.parent;
4702 driver = parent->driver;
4703 if (driver && driver->name)
4704 strlcpy(buffer, driver->name, len);
4708 static void __net_exit netdev_exit(struct net *net)
4710 kfree(net->dev_name_head);
4711 kfree(net->dev_index_head);
4714 static struct pernet_operations __net_initdata netdev_net_ops = {
4715 .init = netdev_init,
4716 .exit = netdev_exit,
4719 static void __net_exit default_device_exit(struct net *net)
4721 struct net_device *dev, *next;
4723 * Push all migratable of the network devices back to the
4724 * initial network namespace
4727 for_each_netdev_safe(net, dev, next) {
4729 char fb_name[IFNAMSIZ];
4731 /* Ignore unmoveable devices (i.e. loopback) */
4732 if (dev->features & NETIF_F_NETNS_LOCAL)
4735 /* Push remaing network devices to init_net */
4736 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4737 err = dev_change_net_namespace(dev, &init_net, fb_name);
4739 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4740 __func__, dev->name, err);
4747 static struct pernet_operations __net_initdata default_device_ops = {
4748 .exit = default_device_exit,
4752 * Initialize the DEV module. At boot time this walks the device list and
4753 * unhooks any devices that fail to initialise (normally hardware not
4754 * present) and leaves us with a valid list of present and active devices.
4759 * This is called single threaded during boot, so no need
4760 * to take the rtnl semaphore.
4762 static int __init net_dev_init(void)
4764 int i, rc = -ENOMEM;
4766 BUG_ON(!dev_boot_phase);
4768 if (dev_proc_init())
4771 if (netdev_kobject_init())
4774 INIT_LIST_HEAD(&ptype_all);
4775 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4776 INIT_LIST_HEAD(&ptype_base[i]);
4778 if (register_pernet_subsys(&netdev_net_ops))
4781 if (register_pernet_device(&default_device_ops))
4785 * Initialise the packet receive queues.
4788 for_each_possible_cpu(i) {
4789 struct softnet_data *queue;
4791 queue = &per_cpu(softnet_data, i);
4792 skb_queue_head_init(&queue->input_pkt_queue);
4793 queue->completion_queue = NULL;
4794 INIT_LIST_HEAD(&queue->poll_list);
4796 queue->backlog.poll = process_backlog;
4797 queue->backlog.weight = weight_p;
4800 netdev_dma_register();
4804 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4805 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4807 hotcpu_notifier(dev_cpu_callback, 0);
4815 subsys_initcall(net_dev_init);
4817 EXPORT_SYMBOL(__dev_get_by_index);
4818 EXPORT_SYMBOL(__dev_get_by_name);
4819 EXPORT_SYMBOL(__dev_remove_pack);
4820 EXPORT_SYMBOL(dev_valid_name);
4821 EXPORT_SYMBOL(dev_add_pack);
4822 EXPORT_SYMBOL(dev_alloc_name);
4823 EXPORT_SYMBOL(dev_close);
4824 EXPORT_SYMBOL(dev_get_by_flags);
4825 EXPORT_SYMBOL(dev_get_by_index);
4826 EXPORT_SYMBOL(dev_get_by_name);
4827 EXPORT_SYMBOL(dev_open);
4828 EXPORT_SYMBOL(dev_queue_xmit);
4829 EXPORT_SYMBOL(dev_remove_pack);
4830 EXPORT_SYMBOL(dev_set_allmulti);
4831 EXPORT_SYMBOL(dev_set_promiscuity);
4832 EXPORT_SYMBOL(dev_change_flags);
4833 EXPORT_SYMBOL(dev_set_mtu);
4834 EXPORT_SYMBOL(dev_set_mac_address);
4835 EXPORT_SYMBOL(free_netdev);
4836 EXPORT_SYMBOL(netdev_boot_setup_check);
4837 EXPORT_SYMBOL(netdev_set_master);
4838 EXPORT_SYMBOL(netdev_state_change);
4839 EXPORT_SYMBOL(netif_receive_skb);
4840 EXPORT_SYMBOL(netif_rx);
4841 EXPORT_SYMBOL(register_gifconf);
4842 EXPORT_SYMBOL(register_netdevice);
4843 EXPORT_SYMBOL(register_netdevice_notifier);
4844 EXPORT_SYMBOL(skb_checksum_help);
4845 EXPORT_SYMBOL(synchronize_net);
4846 EXPORT_SYMBOL(unregister_netdevice);
4847 EXPORT_SYMBOL(unregister_netdevice_notifier);
4848 EXPORT_SYMBOL(net_enable_timestamp);
4849 EXPORT_SYMBOL(net_disable_timestamp);
4850 EXPORT_SYMBOL(dev_get_flags);
4852 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4853 EXPORT_SYMBOL(br_handle_frame_hook);
4854 EXPORT_SYMBOL(br_fdb_get_hook);
4855 EXPORT_SYMBOL(br_fdb_put_hook);
4859 EXPORT_SYMBOL(dev_load);
4862 EXPORT_PER_CPU_SYMBOL(softnet_data);