2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/net_namespace.h>
97 #include <linux/rtnetlink.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <linux/stat.h>
101 #include <linux/if_bridge.h>
102 #include <linux/if_macvlan.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/kmod.h>
109 #include <linux/module.h>
110 #include <linux/kallsyms.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
124 #include "net-sysfs.h"
127 * The list of packet types we will receive (as opposed to discard)
128 * and the routines to invoke.
130 * Why 16. Because with 16 the only overlap we get on a hash of the
131 * low nibble of the protocol value is RARP/SNAP/X.25.
133 * NOTE: That is no longer true with the addition of VLAN tags. Not
134 * sure which should go first, but I bet it won't make much
135 * difference if we are running VLANs. The good news is that
136 * this protocol won't be in the list unless compiled in, so
137 * the average user (w/out VLANs) will not be adversely affected.
154 #define PTYPE_HASH_SIZE (16)
155 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
157 static DEFINE_SPINLOCK(ptype_lock);
158 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
159 static struct list_head ptype_all __read_mostly; /* Taps */
161 #ifdef CONFIG_NET_DMA
163 struct dma_client client;
165 cpumask_t channel_mask;
166 struct dma_chan **channels;
169 static enum dma_state_client
170 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
171 enum dma_state state);
173 static struct net_dma net_dma = {
175 .event_callback = netdev_dma_event,
181 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184 * Pure readers hold dev_base_lock for reading.
186 * Writers must hold the rtnl semaphore while they loop through the
187 * dev_base_head list, and hold dev_base_lock for writing when they do the
188 * actual updates. This allows pure readers to access the list even
189 * while a writer is preparing to update it.
191 * To put it another way, dev_base_lock is held for writing only to
192 * protect against pure readers; the rtnl semaphore provides the
193 * protection against other writers.
195 * See, for example usages, register_netdevice() and
196 * unregister_netdevice(), which must be called with the rtnl
199 DEFINE_RWLOCK(dev_base_lock);
201 EXPORT_SYMBOL(dev_base_lock);
203 #define NETDEV_HASHBITS 8
204 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
208 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
209 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
217 /* Device list insertion */
218 static int list_netdevice(struct net_device *dev)
220 struct net *net = dev_net(dev);
224 write_lock_bh(&dev_base_lock);
225 list_add_tail(&dev->dev_list, &net->dev_base_head);
226 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
227 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
228 write_unlock_bh(&dev_base_lock);
232 /* Device list removal */
233 static void unlist_netdevice(struct net_device *dev)
237 /* Unlink dev from the device chain */
238 write_lock_bh(&dev_base_lock);
239 list_del(&dev->dev_list);
240 hlist_del(&dev->name_hlist);
241 hlist_del(&dev->index_hlist);
242 write_unlock_bh(&dev_base_lock);
249 static RAW_NOTIFIER_HEAD(netdev_chain);
252 * Device drivers call our routines to queue packets here. We empty the
253 * queue in the local softnet handler.
256 DEFINE_PER_CPU(struct softnet_data, softnet_data);
258 #ifdef CONFIG_DEBUG_LOCK_ALLOC
260 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
261 * according to dev->type
263 static const unsigned short netdev_lock_type[] =
264 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
265 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
266 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
267 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
268 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
269 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
270 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
271 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
272 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
273 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
274 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
275 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
276 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
277 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
280 static const char *netdev_lock_name[] =
281 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
282 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
283 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
284 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
285 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
286 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
287 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
288 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
289 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
290 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
291 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
292 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
293 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
294 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
297 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
299 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
303 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
304 if (netdev_lock_type[i] == dev_type)
306 /* the last key is used by default */
307 return ARRAY_SIZE(netdev_lock_type) - 1;
310 static inline void netdev_set_lockdep_class(spinlock_t *lock,
311 unsigned short dev_type)
315 i = netdev_lock_pos(dev_type);
316 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
317 netdev_lock_name[i]);
320 static inline void netdev_set_lockdep_class(spinlock_t *lock,
321 unsigned short dev_type)
326 /*******************************************************************************
328 Protocol management and registration routines
330 *******************************************************************************/
333 * Add a protocol ID to the list. Now that the input handler is
334 * smarter we can dispense with all the messy stuff that used to be
337 * BEWARE!!! Protocol handlers, mangling input packets,
338 * MUST BE last in hash buckets and checking protocol handlers
339 * MUST start from promiscuous ptype_all chain in net_bh.
340 * It is true now, do not change it.
341 * Explanation follows: if protocol handler, mangling packet, will
342 * be the first on list, it is not able to sense, that packet
343 * is cloned and should be copied-on-write, so that it will
344 * change it and subsequent readers will get broken packet.
349 * dev_add_pack - add packet handler
350 * @pt: packet type declaration
352 * Add a protocol handler to the networking stack. The passed &packet_type
353 * is linked into kernel lists and may not be freed until it has been
354 * removed from the kernel lists.
356 * This call does not sleep therefore it can not
357 * guarantee all CPU's that are in middle of receiving packets
358 * will see the new packet type (until the next received packet).
361 void dev_add_pack(struct packet_type *pt)
365 spin_lock_bh(&ptype_lock);
366 if (pt->type == htons(ETH_P_ALL))
367 list_add_rcu(&pt->list, &ptype_all);
369 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
370 list_add_rcu(&pt->list, &ptype_base[hash]);
372 spin_unlock_bh(&ptype_lock);
376 * __dev_remove_pack - remove packet handler
377 * @pt: packet type declaration
379 * Remove a protocol handler that was previously added to the kernel
380 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
381 * from the kernel lists and can be freed or reused once this function
384 * The packet type might still be in use by receivers
385 * and must not be freed until after all the CPU's have gone
386 * through a quiescent state.
388 void __dev_remove_pack(struct packet_type *pt)
390 struct list_head *head;
391 struct packet_type *pt1;
393 spin_lock_bh(&ptype_lock);
395 if (pt->type == htons(ETH_P_ALL))
398 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
400 list_for_each_entry(pt1, head, list) {
402 list_del_rcu(&pt->list);
407 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
409 spin_unlock_bh(&ptype_lock);
412 * dev_remove_pack - remove packet handler
413 * @pt: packet type declaration
415 * Remove a protocol handler that was previously added to the kernel
416 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
417 * from the kernel lists and can be freed or reused once this function
420 * This call sleeps to guarantee that no CPU is looking at the packet
423 void dev_remove_pack(struct packet_type *pt)
425 __dev_remove_pack(pt);
430 /******************************************************************************
432 Device Boot-time Settings Routines
434 *******************************************************************************/
436 /* Boot time configuration table */
437 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
440 * netdev_boot_setup_add - add new setup entry
441 * @name: name of the device
442 * @map: configured settings for the device
444 * Adds new setup entry to the dev_boot_setup list. The function
445 * returns 0 on error and 1 on success. This is a generic routine to
448 static int netdev_boot_setup_add(char *name, struct ifmap *map)
450 struct netdev_boot_setup *s;
454 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
455 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
456 memset(s[i].name, 0, sizeof(s[i].name));
457 strlcpy(s[i].name, name, IFNAMSIZ);
458 memcpy(&s[i].map, map, sizeof(s[i].map));
463 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
467 * netdev_boot_setup_check - check boot time settings
468 * @dev: the netdevice
470 * Check boot time settings for the device.
471 * The found settings are set for the device to be used
472 * later in the device probing.
473 * Returns 0 if no settings found, 1 if they are.
475 int netdev_boot_setup_check(struct net_device *dev)
477 struct netdev_boot_setup *s = dev_boot_setup;
480 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
481 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
482 !strcmp(dev->name, s[i].name)) {
483 dev->irq = s[i].map.irq;
484 dev->base_addr = s[i].map.base_addr;
485 dev->mem_start = s[i].map.mem_start;
486 dev->mem_end = s[i].map.mem_end;
495 * netdev_boot_base - get address from boot time settings
496 * @prefix: prefix for network device
497 * @unit: id for network device
499 * Check boot time settings for the base address of device.
500 * The found settings are set for the device to be used
501 * later in the device probing.
502 * Returns 0 if no settings found.
504 unsigned long netdev_boot_base(const char *prefix, int unit)
506 const struct netdev_boot_setup *s = dev_boot_setup;
510 sprintf(name, "%s%d", prefix, unit);
513 * If device already registered then return base of 1
514 * to indicate not to probe for this interface
516 if (__dev_get_by_name(&init_net, name))
519 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
520 if (!strcmp(name, s[i].name))
521 return s[i].map.base_addr;
526 * Saves at boot time configured settings for any netdevice.
528 int __init netdev_boot_setup(char *str)
533 str = get_options(str, ARRAY_SIZE(ints), ints);
538 memset(&map, 0, sizeof(map));
542 map.base_addr = ints[2];
544 map.mem_start = ints[3];
546 map.mem_end = ints[4];
548 /* Add new entry to the list */
549 return netdev_boot_setup_add(str, &map);
552 __setup("netdev=", netdev_boot_setup);
554 /*******************************************************************************
556 Device Interface Subroutines
558 *******************************************************************************/
561 * __dev_get_by_name - find a device by its name
562 * @net: the applicable net namespace
563 * @name: name to find
565 * Find an interface by name. Must be called under RTNL semaphore
566 * or @dev_base_lock. If the name is found a pointer to the device
567 * is returned. If the name is not found then %NULL is returned. The
568 * reference counters are not incremented so the caller must be
569 * careful with locks.
572 struct net_device *__dev_get_by_name(struct net *net, const char *name)
574 struct hlist_node *p;
576 hlist_for_each(p, dev_name_hash(net, name)) {
577 struct net_device *dev
578 = hlist_entry(p, struct net_device, name_hlist);
579 if (!strncmp(dev->name, name, IFNAMSIZ))
586 * dev_get_by_name - find a device by its name
587 * @net: the applicable net namespace
588 * @name: name to find
590 * Find an interface by name. This can be called from any
591 * context and does its own locking. The returned handle has
592 * the usage count incremented and the caller must use dev_put() to
593 * release it when it is no longer needed. %NULL is returned if no
594 * matching device is found.
597 struct net_device *dev_get_by_name(struct net *net, const char *name)
599 struct net_device *dev;
601 read_lock(&dev_base_lock);
602 dev = __dev_get_by_name(net, name);
605 read_unlock(&dev_base_lock);
610 * __dev_get_by_index - find a device by its ifindex
611 * @net: the applicable net namespace
612 * @ifindex: index of device
614 * Search for an interface by index. Returns %NULL if the device
615 * is not found or a pointer to the device. The device has not
616 * had its reference counter increased so the caller must be careful
617 * about locking. The caller must hold either the RTNL semaphore
621 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
623 struct hlist_node *p;
625 hlist_for_each(p, dev_index_hash(net, ifindex)) {
626 struct net_device *dev
627 = hlist_entry(p, struct net_device, index_hlist);
628 if (dev->ifindex == ifindex)
636 * dev_get_by_index - find a device by its ifindex
637 * @net: the applicable net namespace
638 * @ifindex: index of device
640 * Search for an interface by index. Returns NULL if the device
641 * is not found or a pointer to the device. The device returned has
642 * had a reference added and the pointer is safe until the user calls
643 * dev_put to indicate they have finished with it.
646 struct net_device *dev_get_by_index(struct net *net, int ifindex)
648 struct net_device *dev;
650 read_lock(&dev_base_lock);
651 dev = __dev_get_by_index(net, ifindex);
654 read_unlock(&dev_base_lock);
659 * dev_getbyhwaddr - find a device by its hardware address
660 * @net: the applicable net namespace
661 * @type: media type of device
662 * @ha: hardware address
664 * Search for an interface by MAC address. Returns NULL if the device
665 * is not found or a pointer to the device. The caller must hold the
666 * rtnl semaphore. The returned device has not had its ref count increased
667 * and the caller must therefore be careful about locking
670 * If the API was consistent this would be __dev_get_by_hwaddr
673 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
675 struct net_device *dev;
679 for_each_netdev(net, dev)
680 if (dev->type == type &&
681 !memcmp(dev->dev_addr, ha, dev->addr_len))
687 EXPORT_SYMBOL(dev_getbyhwaddr);
689 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
691 struct net_device *dev;
694 for_each_netdev(net, dev)
695 if (dev->type == type)
701 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
703 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
705 struct net_device *dev;
708 dev = __dev_getfirstbyhwtype(net, type);
715 EXPORT_SYMBOL(dev_getfirstbyhwtype);
718 * dev_get_by_flags - find any device with given flags
719 * @net: the applicable net namespace
720 * @if_flags: IFF_* values
721 * @mask: bitmask of bits in if_flags to check
723 * Search for any interface with the given flags. Returns NULL if a device
724 * is not found or a pointer to the device. The device returned has
725 * had a reference added and the pointer is safe until the user calls
726 * dev_put to indicate they have finished with it.
729 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
731 struct net_device *dev, *ret;
734 read_lock(&dev_base_lock);
735 for_each_netdev(net, dev) {
736 if (((dev->flags ^ if_flags) & mask) == 0) {
742 read_unlock(&dev_base_lock);
747 * dev_valid_name - check if name is okay for network device
750 * Network device names need to be valid file names to
751 * to allow sysfs to work. We also disallow any kind of
754 int dev_valid_name(const char *name)
758 if (strlen(name) >= IFNAMSIZ)
760 if (!strcmp(name, ".") || !strcmp(name, ".."))
764 if (*name == '/' || isspace(*name))
772 * __dev_alloc_name - allocate a name for a device
773 * @net: network namespace to allocate the device name in
774 * @name: name format string
775 * @buf: scratch buffer and result name string
777 * Passed a format string - eg "lt%d" it will try and find a suitable
778 * id. It scans list of devices to build up a free map, then chooses
779 * the first empty slot. The caller must hold the dev_base or rtnl lock
780 * while allocating the name and adding the device in order to avoid
782 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
783 * Returns the number of the unit assigned or a negative errno code.
786 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
790 const int max_netdevices = 8*PAGE_SIZE;
791 unsigned long *inuse;
792 struct net_device *d;
794 p = strnchr(name, IFNAMSIZ-1, '%');
797 * Verify the string as this thing may have come from
798 * the user. There must be either one "%d" and no other "%"
801 if (p[1] != 'd' || strchr(p + 2, '%'))
804 /* Use one page as a bit array of possible slots */
805 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
809 for_each_netdev(net, d) {
810 if (!sscanf(d->name, name, &i))
812 if (i < 0 || i >= max_netdevices)
815 /* avoid cases where sscanf is not exact inverse of printf */
816 snprintf(buf, IFNAMSIZ, name, i);
817 if (!strncmp(buf, d->name, IFNAMSIZ))
821 i = find_first_zero_bit(inuse, max_netdevices);
822 free_page((unsigned long) inuse);
825 snprintf(buf, IFNAMSIZ, name, i);
826 if (!__dev_get_by_name(net, buf))
829 /* It is possible to run out of possible slots
830 * when the name is long and there isn't enough space left
831 * for the digits, or if all bits are used.
837 * dev_alloc_name - allocate a name for a device
839 * @name: name format string
841 * Passed a format string - eg "lt%d" it will try and find a suitable
842 * id. It scans list of devices to build up a free map, then chooses
843 * the first empty slot. The caller must hold the dev_base or rtnl lock
844 * while allocating the name and adding the device in order to avoid
846 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
847 * Returns the number of the unit assigned or a negative errno code.
850 int dev_alloc_name(struct net_device *dev, const char *name)
856 BUG_ON(!dev_net(dev));
858 ret = __dev_alloc_name(net, name, buf);
860 strlcpy(dev->name, buf, IFNAMSIZ);
866 * dev_change_name - change name of a device
868 * @newname: name (or format string) must be at least IFNAMSIZ
870 * Change name of a device, can pass format strings "eth%d".
873 int dev_change_name(struct net_device *dev, char *newname)
875 char oldname[IFNAMSIZ];
881 BUG_ON(!dev_net(dev));
884 if (dev->flags & IFF_UP)
887 if (!dev_valid_name(newname))
890 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
893 memcpy(oldname, dev->name, IFNAMSIZ);
895 if (strchr(newname, '%')) {
896 err = dev_alloc_name(dev, newname);
899 strcpy(newname, dev->name);
901 else if (__dev_get_by_name(net, newname))
904 strlcpy(dev->name, newname, IFNAMSIZ);
907 err = device_rename(&dev->dev, dev->name);
909 memcpy(dev->name, oldname, IFNAMSIZ);
913 write_lock_bh(&dev_base_lock);
914 hlist_del(&dev->name_hlist);
915 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
916 write_unlock_bh(&dev_base_lock);
918 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
919 ret = notifier_to_errno(ret);
924 "%s: name change rollback failed: %d.\n",
928 memcpy(dev->name, oldname, IFNAMSIZ);
937 * netdev_features_change - device changes features
938 * @dev: device to cause notification
940 * Called to indicate a device has changed features.
942 void netdev_features_change(struct net_device *dev)
944 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
946 EXPORT_SYMBOL(netdev_features_change);
949 * netdev_state_change - device changes state
950 * @dev: device to cause notification
952 * Called to indicate a device has changed state. This function calls
953 * the notifier chains for netdev_chain and sends a NEWLINK message
954 * to the routing socket.
956 void netdev_state_change(struct net_device *dev)
958 if (dev->flags & IFF_UP) {
959 call_netdevice_notifiers(NETDEV_CHANGE, dev);
960 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
965 * dev_load - load a network module
966 * @net: the applicable net namespace
967 * @name: name of interface
969 * If a network interface is not present and the process has suitable
970 * privileges this function loads the module. If module loading is not
971 * available in this kernel then it becomes a nop.
974 void dev_load(struct net *net, const char *name)
976 struct net_device *dev;
978 read_lock(&dev_base_lock);
979 dev = __dev_get_by_name(net, name);
980 read_unlock(&dev_base_lock);
982 if (!dev && capable(CAP_SYS_MODULE))
983 request_module("%s", name);
987 * dev_open - prepare an interface for use.
988 * @dev: device to open
990 * Takes a device from down to up state. The device's private open
991 * function is invoked and then the multicast lists are loaded. Finally
992 * the device is moved into the up state and a %NETDEV_UP message is
993 * sent to the netdev notifier chain.
995 * Calling this function on an active interface is a nop. On a failure
996 * a negative errno code is returned.
998 int dev_open(struct net_device *dev)
1008 if (dev->flags & IFF_UP)
1012 * Is it even present?
1014 if (!netif_device_present(dev))
1018 * Call device private open method
1020 set_bit(__LINK_STATE_START, &dev->state);
1022 if (dev->validate_addr)
1023 ret = dev->validate_addr(dev);
1025 if (!ret && dev->open)
1026 ret = dev->open(dev);
1029 * If it went open OK then:
1033 clear_bit(__LINK_STATE_START, &dev->state);
1038 dev->flags |= IFF_UP;
1041 * Initialize multicasting status
1043 dev_set_rx_mode(dev);
1046 * Wakeup transmit queue engine
1051 * ... and announce new interface.
1053 call_netdevice_notifiers(NETDEV_UP, dev);
1060 * dev_close - shutdown an interface.
1061 * @dev: device to shutdown
1063 * This function moves an active device into down state. A
1064 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1065 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1068 int dev_close(struct net_device *dev)
1074 if (!(dev->flags & IFF_UP))
1078 * Tell people we are going down, so that they can
1079 * prepare to death, when device is still operating.
1081 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1083 clear_bit(__LINK_STATE_START, &dev->state);
1085 /* Synchronize to scheduled poll. We cannot touch poll list,
1086 * it can be even on different cpu. So just clear netif_running().
1088 * dev->stop() will invoke napi_disable() on all of it's
1089 * napi_struct instances on this device.
1091 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1093 dev_deactivate(dev);
1096 * Call the device specific close. This cannot fail.
1097 * Only if device is UP
1099 * We allow it to be called even after a DETACH hot-plug
1106 * Device is now down.
1109 dev->flags &= ~IFF_UP;
1112 * Tell people we are down
1114 call_netdevice_notifiers(NETDEV_DOWN, dev);
1120 static int dev_boot_phase = 1;
1123 * Device change register/unregister. These are not inline or static
1124 * as we export them to the world.
1128 * register_netdevice_notifier - register a network notifier block
1131 * Register a notifier to be called when network device events occur.
1132 * The notifier passed is linked into the kernel structures and must
1133 * not be reused until it has been unregistered. A negative errno code
1134 * is returned on a failure.
1136 * When registered all registration and up events are replayed
1137 * to the new notifier to allow device to have a race free
1138 * view of the network device list.
1141 int register_netdevice_notifier(struct notifier_block *nb)
1143 struct net_device *dev;
1144 struct net_device *last;
1149 err = raw_notifier_chain_register(&netdev_chain, nb);
1155 for_each_netdev(net, dev) {
1156 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1157 err = notifier_to_errno(err);
1161 if (!(dev->flags & IFF_UP))
1164 nb->notifier_call(nb, NETDEV_UP, dev);
1175 for_each_netdev(net, dev) {
1179 if (dev->flags & IFF_UP) {
1180 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1181 nb->notifier_call(nb, NETDEV_DOWN, dev);
1183 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1187 raw_notifier_chain_unregister(&netdev_chain, nb);
1192 * unregister_netdevice_notifier - unregister a network notifier block
1195 * Unregister a notifier previously registered by
1196 * register_netdevice_notifier(). The notifier is unlinked into the
1197 * kernel structures and may then be reused. A negative errno code
1198 * is returned on a failure.
1201 int unregister_netdevice_notifier(struct notifier_block *nb)
1206 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1212 * call_netdevice_notifiers - call all network notifier blocks
1213 * @val: value passed unmodified to notifier function
1214 * @dev: net_device pointer passed unmodified to notifier function
1216 * Call all network notifier blocks. Parameters and return value
1217 * are as for raw_notifier_call_chain().
1220 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1222 return raw_notifier_call_chain(&netdev_chain, val, dev);
1225 /* When > 0 there are consumers of rx skb time stamps */
1226 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1228 void net_enable_timestamp(void)
1230 atomic_inc(&netstamp_needed);
1233 void net_disable_timestamp(void)
1235 atomic_dec(&netstamp_needed);
1238 static inline void net_timestamp(struct sk_buff *skb)
1240 if (atomic_read(&netstamp_needed))
1241 __net_timestamp(skb);
1243 skb->tstamp.tv64 = 0;
1247 * Support routine. Sends outgoing frames to any network
1248 * taps currently in use.
1251 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1253 struct packet_type *ptype;
1258 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1259 /* Never send packets back to the socket
1260 * they originated from - MvS (miquels@drinkel.ow.org)
1262 if ((ptype->dev == dev || !ptype->dev) &&
1263 (ptype->af_packet_priv == NULL ||
1264 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1265 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1269 /* skb->nh should be correctly
1270 set by sender, so that the second statement is
1271 just protection against buggy protocols.
1273 skb_reset_mac_header(skb2);
1275 if (skb_network_header(skb2) < skb2->data ||
1276 skb2->network_header > skb2->tail) {
1277 if (net_ratelimit())
1278 printk(KERN_CRIT "protocol %04x is "
1280 skb2->protocol, dev->name);
1281 skb_reset_network_header(skb2);
1284 skb2->transport_header = skb2->network_header;
1285 skb2->pkt_type = PACKET_OUTGOING;
1286 ptype->func(skb2, skb->dev, ptype, skb->dev);
1293 void __netif_schedule(struct net_device *dev)
1295 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1296 unsigned long flags;
1297 struct softnet_data *sd;
1299 local_irq_save(flags);
1300 sd = &__get_cpu_var(softnet_data);
1301 dev->next_sched = sd->output_queue;
1302 sd->output_queue = dev;
1303 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1304 local_irq_restore(flags);
1307 EXPORT_SYMBOL(__netif_schedule);
1309 void dev_kfree_skb_irq(struct sk_buff *skb)
1311 if (atomic_dec_and_test(&skb->users)) {
1312 struct softnet_data *sd;
1313 unsigned long flags;
1315 local_irq_save(flags);
1316 sd = &__get_cpu_var(softnet_data);
1317 skb->next = sd->completion_queue;
1318 sd->completion_queue = skb;
1319 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1320 local_irq_restore(flags);
1323 EXPORT_SYMBOL(dev_kfree_skb_irq);
1325 void dev_kfree_skb_any(struct sk_buff *skb)
1327 if (in_irq() || irqs_disabled())
1328 dev_kfree_skb_irq(skb);
1332 EXPORT_SYMBOL(dev_kfree_skb_any);
1336 * netif_device_detach - mark device as removed
1337 * @dev: network device
1339 * Mark device as removed from system and therefore no longer available.
1341 void netif_device_detach(struct net_device *dev)
1343 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1344 netif_running(dev)) {
1345 netif_stop_queue(dev);
1348 EXPORT_SYMBOL(netif_device_detach);
1351 * netif_device_attach - mark device as attached
1352 * @dev: network device
1354 * Mark device as attached from system and restart if needed.
1356 void netif_device_attach(struct net_device *dev)
1358 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1359 netif_running(dev)) {
1360 netif_wake_queue(dev);
1361 __netdev_watchdog_up(dev);
1364 EXPORT_SYMBOL(netif_device_attach);
1366 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1368 return ((features & NETIF_F_GEN_CSUM) ||
1369 ((features & NETIF_F_IP_CSUM) &&
1370 protocol == htons(ETH_P_IP)) ||
1371 ((features & NETIF_F_IPV6_CSUM) &&
1372 protocol == htons(ETH_P_IPV6)));
1375 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1377 if (can_checksum_protocol(dev->features, skb->protocol))
1380 if (skb->protocol == htons(ETH_P_8021Q)) {
1381 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1382 if (can_checksum_protocol(dev->features & dev->vlan_features,
1383 veh->h_vlan_encapsulated_proto))
1391 * Invalidate hardware checksum when packet is to be mangled, and
1392 * complete checksum manually on outgoing path.
1394 int skb_checksum_help(struct sk_buff *skb)
1397 int ret = 0, offset;
1399 if (skb->ip_summed == CHECKSUM_COMPLETE)
1400 goto out_set_summed;
1402 if (unlikely(skb_shinfo(skb)->gso_size)) {
1403 /* Let GSO fix up the checksum. */
1404 goto out_set_summed;
1407 offset = skb->csum_start - skb_headroom(skb);
1408 BUG_ON(offset >= skb_headlen(skb));
1409 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1411 offset += skb->csum_offset;
1412 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1414 if (skb_cloned(skb) &&
1415 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1416 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1421 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1423 skb->ip_summed = CHECKSUM_NONE;
1429 * skb_gso_segment - Perform segmentation on skb.
1430 * @skb: buffer to segment
1431 * @features: features for the output path (see dev->features)
1433 * This function segments the given skb and returns a list of segments.
1435 * It may return NULL if the skb requires no segmentation. This is
1436 * only possible when GSO is used for verifying header integrity.
1438 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1440 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1441 struct packet_type *ptype;
1442 __be16 type = skb->protocol;
1445 BUG_ON(skb_shinfo(skb)->frag_list);
1447 skb_reset_mac_header(skb);
1448 skb->mac_len = skb->network_header - skb->mac_header;
1449 __skb_pull(skb, skb->mac_len);
1451 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1452 if (skb_header_cloned(skb) &&
1453 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1454 return ERR_PTR(err);
1458 list_for_each_entry_rcu(ptype,
1459 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1460 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1461 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1462 err = ptype->gso_send_check(skb);
1463 segs = ERR_PTR(err);
1464 if (err || skb_gso_ok(skb, features))
1466 __skb_push(skb, (skb->data -
1467 skb_network_header(skb)));
1469 segs = ptype->gso_segment(skb, features);
1475 __skb_push(skb, skb->data - skb_mac_header(skb));
1480 EXPORT_SYMBOL(skb_gso_segment);
1482 /* Take action when hardware reception checksum errors are detected. */
1484 void netdev_rx_csum_fault(struct net_device *dev)
1486 if (net_ratelimit()) {
1487 printk(KERN_ERR "%s: hw csum failure.\n",
1488 dev ? dev->name : "<unknown>");
1492 EXPORT_SYMBOL(netdev_rx_csum_fault);
1495 /* Actually, we should eliminate this check as soon as we know, that:
1496 * 1. IOMMU is present and allows to map all the memory.
1497 * 2. No high memory really exists on this machine.
1500 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1502 #ifdef CONFIG_HIGHMEM
1505 if (dev->features & NETIF_F_HIGHDMA)
1508 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1509 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1517 void (*destructor)(struct sk_buff *skb);
1520 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1522 static void dev_gso_skb_destructor(struct sk_buff *skb)
1524 struct dev_gso_cb *cb;
1527 struct sk_buff *nskb = skb->next;
1529 skb->next = nskb->next;
1532 } while (skb->next);
1534 cb = DEV_GSO_CB(skb);
1536 cb->destructor(skb);
1540 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1541 * @skb: buffer to segment
1543 * This function segments the given skb and stores the list of segments
1546 static int dev_gso_segment(struct sk_buff *skb)
1548 struct net_device *dev = skb->dev;
1549 struct sk_buff *segs;
1550 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1553 segs = skb_gso_segment(skb, features);
1555 /* Verifying header integrity only. */
1560 return PTR_ERR(segs);
1563 DEV_GSO_CB(skb)->destructor = skb->destructor;
1564 skb->destructor = dev_gso_skb_destructor;
1569 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1571 if (likely(!skb->next)) {
1572 if (!list_empty(&ptype_all))
1573 dev_queue_xmit_nit(skb, dev);
1575 if (netif_needs_gso(dev, skb)) {
1576 if (unlikely(dev_gso_segment(skb)))
1582 return dev->hard_start_xmit(skb, dev);
1587 struct sk_buff *nskb = skb->next;
1590 skb->next = nskb->next;
1592 rc = dev->hard_start_xmit(nskb, dev);
1594 nskb->next = skb->next;
1598 if (unlikely((netif_queue_stopped(dev) ||
1599 netif_subqueue_stopped(dev, skb)) &&
1601 return NETDEV_TX_BUSY;
1602 } while (skb->next);
1604 skb->destructor = DEV_GSO_CB(skb)->destructor;
1612 * dev_queue_xmit - transmit a buffer
1613 * @skb: buffer to transmit
1615 * Queue a buffer for transmission to a network device. The caller must
1616 * have set the device and priority and built the buffer before calling
1617 * this function. The function can be called from an interrupt.
1619 * A negative errno code is returned on a failure. A success does not
1620 * guarantee the frame will be transmitted as it may be dropped due
1621 * to congestion or traffic shaping.
1623 * -----------------------------------------------------------------------------------
1624 * I notice this method can also return errors from the queue disciplines,
1625 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1628 * Regardless of the return value, the skb is consumed, so it is currently
1629 * difficult to retry a send to this method. (You can bump the ref count
1630 * before sending to hold a reference for retry if you are careful.)
1632 * When calling this method, interrupts MUST be enabled. This is because
1633 * the BH enable code must have IRQs enabled so that it will not deadlock.
1637 int dev_queue_xmit(struct sk_buff *skb)
1639 struct net_device *dev = skb->dev;
1643 /* GSO will handle the following emulations directly. */
1644 if (netif_needs_gso(dev, skb))
1647 if (skb_shinfo(skb)->frag_list &&
1648 !(dev->features & NETIF_F_FRAGLIST) &&
1649 __skb_linearize(skb))
1652 /* Fragmented skb is linearized if device does not support SG,
1653 * or if at least one of fragments is in highmem and device
1654 * does not support DMA from it.
1656 if (skb_shinfo(skb)->nr_frags &&
1657 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1658 __skb_linearize(skb))
1661 /* If packet is not checksummed and device does not support
1662 * checksumming for this protocol, complete checksumming here.
1664 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1665 skb_set_transport_header(skb, skb->csum_start -
1667 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1672 spin_lock_prefetch(&dev->queue_lock);
1674 /* Disable soft irqs for various locks below. Also
1675 * stops preemption for RCU.
1679 /* Updates of qdisc are serialized by queue_lock.
1680 * The struct Qdisc which is pointed to by qdisc is now a
1681 * rcu structure - it may be accessed without acquiring
1682 * a lock (but the structure may be stale.) The freeing of the
1683 * qdisc will be deferred until it's known that there are no
1684 * more references to it.
1686 * If the qdisc has an enqueue function, we still need to
1687 * hold the queue_lock before calling it, since queue_lock
1688 * also serializes access to the device queue.
1691 q = rcu_dereference(dev->qdisc);
1692 #ifdef CONFIG_NET_CLS_ACT
1693 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1696 /* Grab device queue */
1697 spin_lock(&dev->queue_lock);
1700 /* reset queue_mapping to zero */
1701 skb_set_queue_mapping(skb, 0);
1702 rc = q->enqueue(skb, q);
1704 spin_unlock(&dev->queue_lock);
1706 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1709 spin_unlock(&dev->queue_lock);
1712 /* The device has no queue. Common case for software devices:
1713 loopback, all the sorts of tunnels...
1715 Really, it is unlikely that netif_tx_lock protection is necessary
1716 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1718 However, it is possible, that they rely on protection
1721 Check this and shot the lock. It is not prone from deadlocks.
1722 Either shot noqueue qdisc, it is even simpler 8)
1724 if (dev->flags & IFF_UP) {
1725 int cpu = smp_processor_id(); /* ok because BHs are off */
1727 if (dev->xmit_lock_owner != cpu) {
1729 HARD_TX_LOCK(dev, cpu);
1731 if (!netif_queue_stopped(dev) &&
1732 !netif_subqueue_stopped(dev, skb)) {
1734 if (!dev_hard_start_xmit(skb, dev)) {
1735 HARD_TX_UNLOCK(dev);
1739 HARD_TX_UNLOCK(dev);
1740 if (net_ratelimit())
1741 printk(KERN_CRIT "Virtual device %s asks to "
1742 "queue packet!\n", dev->name);
1744 /* Recursion is detected! It is possible,
1746 if (net_ratelimit())
1747 printk(KERN_CRIT "Dead loop on virtual device "
1748 "%s, fix it urgently!\n", dev->name);
1753 rcu_read_unlock_bh();
1759 rcu_read_unlock_bh();
1764 /*=======================================================================
1766 =======================================================================*/
1768 int netdev_max_backlog __read_mostly = 1000;
1769 int netdev_budget __read_mostly = 300;
1770 int weight_p __read_mostly = 64; /* old backlog weight */
1772 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1776 * netif_rx - post buffer to the network code
1777 * @skb: buffer to post
1779 * This function receives a packet from a device driver and queues it for
1780 * the upper (protocol) levels to process. It always succeeds. The buffer
1781 * may be dropped during processing for congestion control or by the
1785 * NET_RX_SUCCESS (no congestion)
1786 * NET_RX_DROP (packet was dropped)
1790 int netif_rx(struct sk_buff *skb)
1792 struct softnet_data *queue;
1793 unsigned long flags;
1795 /* if netpoll wants it, pretend we never saw it */
1796 if (netpoll_rx(skb))
1799 if (!skb->tstamp.tv64)
1803 * The code is rearranged so that the path is the most
1804 * short when CPU is congested, but is still operating.
1806 local_irq_save(flags);
1807 queue = &__get_cpu_var(softnet_data);
1809 __get_cpu_var(netdev_rx_stat).total++;
1810 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1811 if (queue->input_pkt_queue.qlen) {
1814 __skb_queue_tail(&queue->input_pkt_queue, skb);
1815 local_irq_restore(flags);
1816 return NET_RX_SUCCESS;
1819 napi_schedule(&queue->backlog);
1823 __get_cpu_var(netdev_rx_stat).dropped++;
1824 local_irq_restore(flags);
1830 int netif_rx_ni(struct sk_buff *skb)
1835 err = netif_rx(skb);
1836 if (local_softirq_pending())
1843 EXPORT_SYMBOL(netif_rx_ni);
1845 static inline struct net_device *skb_bond(struct sk_buff *skb)
1847 struct net_device *dev = skb->dev;
1850 if (skb_bond_should_drop(skb)) {
1854 skb->dev = dev->master;
1861 static void net_tx_action(struct softirq_action *h)
1863 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1865 if (sd->completion_queue) {
1866 struct sk_buff *clist;
1868 local_irq_disable();
1869 clist = sd->completion_queue;
1870 sd->completion_queue = NULL;
1874 struct sk_buff *skb = clist;
1875 clist = clist->next;
1877 BUG_TRAP(!atomic_read(&skb->users));
1882 if (sd->output_queue) {
1883 struct net_device *head;
1885 local_irq_disable();
1886 head = sd->output_queue;
1887 sd->output_queue = NULL;
1891 struct net_device *dev = head;
1892 head = head->next_sched;
1894 smp_mb__before_clear_bit();
1895 clear_bit(__LINK_STATE_SCHED, &dev->state);
1897 if (spin_trylock(&dev->queue_lock)) {
1899 spin_unlock(&dev->queue_lock);
1901 netif_schedule(dev);
1907 static inline int deliver_skb(struct sk_buff *skb,
1908 struct packet_type *pt_prev,
1909 struct net_device *orig_dev)
1911 atomic_inc(&skb->users);
1912 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1915 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1916 /* These hooks defined here for ATM */
1918 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1919 unsigned char *addr);
1920 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1923 * If bridge module is loaded call bridging hook.
1924 * returns NULL if packet was consumed.
1926 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1927 struct sk_buff *skb) __read_mostly;
1928 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1929 struct packet_type **pt_prev, int *ret,
1930 struct net_device *orig_dev)
1932 struct net_bridge_port *port;
1934 if (skb->pkt_type == PACKET_LOOPBACK ||
1935 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1939 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1943 return br_handle_frame_hook(port, skb);
1946 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1949 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1950 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1951 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1953 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1954 struct packet_type **pt_prev,
1956 struct net_device *orig_dev)
1958 if (skb->dev->macvlan_port == NULL)
1962 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1965 return macvlan_handle_frame_hook(skb);
1968 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1971 #ifdef CONFIG_NET_CLS_ACT
1972 /* TODO: Maybe we should just force sch_ingress to be compiled in
1973 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1974 * a compare and 2 stores extra right now if we dont have it on
1975 * but have CONFIG_NET_CLS_ACT
1976 * NOTE: This doesnt stop any functionality; if you dont have
1977 * the ingress scheduler, you just cant add policies on ingress.
1980 static int ing_filter(struct sk_buff *skb)
1983 struct net_device *dev = skb->dev;
1984 int result = TC_ACT_OK;
1985 u32 ttl = G_TC_RTTL(skb->tc_verd);
1987 if (MAX_RED_LOOP < ttl++) {
1989 "Redir loop detected Dropping packet (%d->%d)\n",
1990 skb->iif, dev->ifindex);
1994 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
1995 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1997 spin_lock(&dev->ingress_lock);
1998 if ((q = dev->qdisc_ingress) != NULL)
1999 result = q->enqueue(skb, q);
2000 spin_unlock(&dev->ingress_lock);
2005 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2006 struct packet_type **pt_prev,
2007 int *ret, struct net_device *orig_dev)
2009 if (!skb->dev->qdisc_ingress)
2013 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2016 /* Huh? Why does turning on AF_PACKET affect this? */
2017 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2020 switch (ing_filter(skb)) {
2034 * netif_receive_skb - process receive buffer from network
2035 * @skb: buffer to process
2037 * netif_receive_skb() is the main receive data processing function.
2038 * It always succeeds. The buffer may be dropped during processing
2039 * for congestion control or by the protocol layers.
2041 * This function may only be called from softirq context and interrupts
2042 * should be enabled.
2044 * Return values (usually ignored):
2045 * NET_RX_SUCCESS: no congestion
2046 * NET_RX_DROP: packet was dropped
2048 int netif_receive_skb(struct sk_buff *skb)
2050 struct packet_type *ptype, *pt_prev;
2051 struct net_device *orig_dev;
2052 int ret = NET_RX_DROP;
2055 /* if we've gotten here through NAPI, check netpoll */
2056 if (netpoll_receive_skb(skb))
2059 if (!skb->tstamp.tv64)
2063 skb->iif = skb->dev->ifindex;
2065 orig_dev = skb_bond(skb);
2070 __get_cpu_var(netdev_rx_stat).total++;
2072 skb_reset_network_header(skb);
2073 skb_reset_transport_header(skb);
2074 skb->mac_len = skb->network_header - skb->mac_header;
2080 /* Don't receive packets in an exiting network namespace */
2081 if (!net_alive(dev_net(skb->dev)))
2084 #ifdef CONFIG_NET_CLS_ACT
2085 if (skb->tc_verd & TC_NCLS) {
2086 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2091 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2092 if (!ptype->dev || ptype->dev == skb->dev) {
2094 ret = deliver_skb(skb, pt_prev, orig_dev);
2099 #ifdef CONFIG_NET_CLS_ACT
2100 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2106 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2109 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2113 type = skb->protocol;
2114 list_for_each_entry_rcu(ptype,
2115 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2116 if (ptype->type == type &&
2117 (!ptype->dev || ptype->dev == skb->dev)) {
2119 ret = deliver_skb(skb, pt_prev, orig_dev);
2125 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2128 /* Jamal, now you will not able to escape explaining
2129 * me how you were going to use this. :-)
2139 static int process_backlog(struct napi_struct *napi, int quota)
2142 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2143 unsigned long start_time = jiffies;
2145 napi->weight = weight_p;
2147 struct sk_buff *skb;
2148 struct net_device *dev;
2150 local_irq_disable();
2151 skb = __skb_dequeue(&queue->input_pkt_queue);
2153 __napi_complete(napi);
2162 netif_receive_skb(skb);
2165 } while (++work < quota && jiffies == start_time);
2171 * __napi_schedule - schedule for receive
2172 * @n: entry to schedule
2174 * The entry's receive function will be scheduled to run
2176 void __napi_schedule(struct napi_struct *n)
2178 unsigned long flags;
2180 local_irq_save(flags);
2181 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2182 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2183 local_irq_restore(flags);
2185 EXPORT_SYMBOL(__napi_schedule);
2188 static void net_rx_action(struct softirq_action *h)
2190 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2191 unsigned long start_time = jiffies;
2192 int budget = netdev_budget;
2195 local_irq_disable();
2197 while (!list_empty(list)) {
2198 struct napi_struct *n;
2201 /* If softirq window is exhuasted then punt.
2203 * Note that this is a slight policy change from the
2204 * previous NAPI code, which would allow up to 2
2205 * jiffies to pass before breaking out. The test
2206 * used to be "jiffies - start_time > 1".
2208 if (unlikely(budget <= 0 || jiffies != start_time))
2213 /* Even though interrupts have been re-enabled, this
2214 * access is safe because interrupts can only add new
2215 * entries to the tail of this list, and only ->poll()
2216 * calls can remove this head entry from the list.
2218 n = list_entry(list->next, struct napi_struct, poll_list);
2220 have = netpoll_poll_lock(n);
2224 /* This NAPI_STATE_SCHED test is for avoiding a race
2225 * with netpoll's poll_napi(). Only the entity which
2226 * obtains the lock and sees NAPI_STATE_SCHED set will
2227 * actually make the ->poll() call. Therefore we avoid
2228 * accidently calling ->poll() when NAPI is not scheduled.
2231 if (test_bit(NAPI_STATE_SCHED, &n->state))
2232 work = n->poll(n, weight);
2234 WARN_ON_ONCE(work > weight);
2238 local_irq_disable();
2240 /* Drivers must not modify the NAPI state if they
2241 * consume the entire weight. In such cases this code
2242 * still "owns" the NAPI instance and therefore can
2243 * move the instance around on the list at-will.
2245 if (unlikely(work == weight)) {
2246 if (unlikely(napi_disable_pending(n)))
2249 list_move_tail(&n->poll_list, list);
2252 netpoll_poll_unlock(have);
2257 #ifdef CONFIG_NET_DMA
2259 * There may not be any more sk_buffs coming right now, so push
2260 * any pending DMA copies to hardware
2262 if (!cpus_empty(net_dma.channel_mask)) {
2264 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2265 struct dma_chan *chan = net_dma.channels[chan_idx];
2267 dma_async_memcpy_issue_pending(chan);
2275 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2276 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2280 static gifconf_func_t * gifconf_list [NPROTO];
2283 * register_gifconf - register a SIOCGIF handler
2284 * @family: Address family
2285 * @gifconf: Function handler
2287 * Register protocol dependent address dumping routines. The handler
2288 * that is passed must not be freed or reused until it has been replaced
2289 * by another handler.
2291 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2293 if (family >= NPROTO)
2295 gifconf_list[family] = gifconf;
2301 * Map an interface index to its name (SIOCGIFNAME)
2305 * We need this ioctl for efficient implementation of the
2306 * if_indextoname() function required by the IPv6 API. Without
2307 * it, we would have to search all the interfaces to find a
2311 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2313 struct net_device *dev;
2317 * Fetch the caller's info block.
2320 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2323 read_lock(&dev_base_lock);
2324 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2326 read_unlock(&dev_base_lock);
2330 strcpy(ifr.ifr_name, dev->name);
2331 read_unlock(&dev_base_lock);
2333 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2339 * Perform a SIOCGIFCONF call. This structure will change
2340 * size eventually, and there is nothing I can do about it.
2341 * Thus we will need a 'compatibility mode'.
2344 static int dev_ifconf(struct net *net, char __user *arg)
2347 struct net_device *dev;
2354 * Fetch the caller's info block.
2357 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2364 * Loop over the interfaces, and write an info block for each.
2368 for_each_netdev(net, dev) {
2369 for (i = 0; i < NPROTO; i++) {
2370 if (gifconf_list[i]) {
2373 done = gifconf_list[i](dev, NULL, 0);
2375 done = gifconf_list[i](dev, pos + total,
2385 * All done. Write the updated control block back to the caller.
2387 ifc.ifc_len = total;
2390 * Both BSD and Solaris return 0 here, so we do too.
2392 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2395 #ifdef CONFIG_PROC_FS
2397 * This is invoked by the /proc filesystem handler to display a device
2400 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2401 __acquires(dev_base_lock)
2403 struct net *net = seq_file_net(seq);
2405 struct net_device *dev;
2407 read_lock(&dev_base_lock);
2409 return SEQ_START_TOKEN;
2412 for_each_netdev(net, dev)
2419 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2421 struct net *net = seq_file_net(seq);
2423 return v == SEQ_START_TOKEN ?
2424 first_net_device(net) : next_net_device((struct net_device *)v);
2427 void dev_seq_stop(struct seq_file *seq, void *v)
2428 __releases(dev_base_lock)
2430 read_unlock(&dev_base_lock);
2433 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2435 struct net_device_stats *stats = dev->get_stats(dev);
2437 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2438 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2439 dev->name, stats->rx_bytes, stats->rx_packets,
2441 stats->rx_dropped + stats->rx_missed_errors,
2442 stats->rx_fifo_errors,
2443 stats->rx_length_errors + stats->rx_over_errors +
2444 stats->rx_crc_errors + stats->rx_frame_errors,
2445 stats->rx_compressed, stats->multicast,
2446 stats->tx_bytes, stats->tx_packets,
2447 stats->tx_errors, stats->tx_dropped,
2448 stats->tx_fifo_errors, stats->collisions,
2449 stats->tx_carrier_errors +
2450 stats->tx_aborted_errors +
2451 stats->tx_window_errors +
2452 stats->tx_heartbeat_errors,
2453 stats->tx_compressed);
2457 * Called from the PROCfs module. This now uses the new arbitrary sized
2458 * /proc/net interface to create /proc/net/dev
2460 static int dev_seq_show(struct seq_file *seq, void *v)
2462 if (v == SEQ_START_TOKEN)
2463 seq_puts(seq, "Inter-| Receive "
2465 " face |bytes packets errs drop fifo frame "
2466 "compressed multicast|bytes packets errs "
2467 "drop fifo colls carrier compressed\n");
2469 dev_seq_printf_stats(seq, v);
2473 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2475 struct netif_rx_stats *rc = NULL;
2477 while (*pos < nr_cpu_ids)
2478 if (cpu_online(*pos)) {
2479 rc = &per_cpu(netdev_rx_stat, *pos);
2486 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2488 return softnet_get_online(pos);
2491 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2494 return softnet_get_online(pos);
2497 static void softnet_seq_stop(struct seq_file *seq, void *v)
2501 static int softnet_seq_show(struct seq_file *seq, void *v)
2503 struct netif_rx_stats *s = v;
2505 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2506 s->total, s->dropped, s->time_squeeze, 0,
2507 0, 0, 0, 0, /* was fastroute */
2512 static const struct seq_operations dev_seq_ops = {
2513 .start = dev_seq_start,
2514 .next = dev_seq_next,
2515 .stop = dev_seq_stop,
2516 .show = dev_seq_show,
2519 static int dev_seq_open(struct inode *inode, struct file *file)
2521 return seq_open_net(inode, file, &dev_seq_ops,
2522 sizeof(struct seq_net_private));
2525 static const struct file_operations dev_seq_fops = {
2526 .owner = THIS_MODULE,
2527 .open = dev_seq_open,
2529 .llseek = seq_lseek,
2530 .release = seq_release_net,
2533 static const struct seq_operations softnet_seq_ops = {
2534 .start = softnet_seq_start,
2535 .next = softnet_seq_next,
2536 .stop = softnet_seq_stop,
2537 .show = softnet_seq_show,
2540 static int softnet_seq_open(struct inode *inode, struct file *file)
2542 return seq_open(file, &softnet_seq_ops);
2545 static const struct file_operations softnet_seq_fops = {
2546 .owner = THIS_MODULE,
2547 .open = softnet_seq_open,
2549 .llseek = seq_lseek,
2550 .release = seq_release,
2553 static void *ptype_get_idx(loff_t pos)
2555 struct packet_type *pt = NULL;
2559 list_for_each_entry_rcu(pt, &ptype_all, list) {
2565 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2566 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2575 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2579 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2582 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2584 struct packet_type *pt;
2585 struct list_head *nxt;
2589 if (v == SEQ_START_TOKEN)
2590 return ptype_get_idx(0);
2593 nxt = pt->list.next;
2594 if (pt->type == htons(ETH_P_ALL)) {
2595 if (nxt != &ptype_all)
2598 nxt = ptype_base[0].next;
2600 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2602 while (nxt == &ptype_base[hash]) {
2603 if (++hash >= PTYPE_HASH_SIZE)
2605 nxt = ptype_base[hash].next;
2608 return list_entry(nxt, struct packet_type, list);
2611 static void ptype_seq_stop(struct seq_file *seq, void *v)
2617 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2619 #ifdef CONFIG_KALLSYMS
2620 unsigned long offset = 0, symsize;
2621 const char *symname;
2625 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2632 modname = delim = "";
2633 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2639 seq_printf(seq, "[%p]", sym);
2642 static int ptype_seq_show(struct seq_file *seq, void *v)
2644 struct packet_type *pt = v;
2646 if (v == SEQ_START_TOKEN)
2647 seq_puts(seq, "Type Device Function\n");
2648 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2649 if (pt->type == htons(ETH_P_ALL))
2650 seq_puts(seq, "ALL ");
2652 seq_printf(seq, "%04x", ntohs(pt->type));
2654 seq_printf(seq, " %-8s ",
2655 pt->dev ? pt->dev->name : "");
2656 ptype_seq_decode(seq, pt->func);
2657 seq_putc(seq, '\n');
2663 static const struct seq_operations ptype_seq_ops = {
2664 .start = ptype_seq_start,
2665 .next = ptype_seq_next,
2666 .stop = ptype_seq_stop,
2667 .show = ptype_seq_show,
2670 static int ptype_seq_open(struct inode *inode, struct file *file)
2672 return seq_open_net(inode, file, &ptype_seq_ops,
2673 sizeof(struct seq_net_private));
2676 static const struct file_operations ptype_seq_fops = {
2677 .owner = THIS_MODULE,
2678 .open = ptype_seq_open,
2680 .llseek = seq_lseek,
2681 .release = seq_release_net,
2685 static int __net_init dev_proc_net_init(struct net *net)
2689 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2691 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2693 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2696 if (wext_proc_init(net))
2702 proc_net_remove(net, "ptype");
2704 proc_net_remove(net, "softnet_stat");
2706 proc_net_remove(net, "dev");
2710 static void __net_exit dev_proc_net_exit(struct net *net)
2712 wext_proc_exit(net);
2714 proc_net_remove(net, "ptype");
2715 proc_net_remove(net, "softnet_stat");
2716 proc_net_remove(net, "dev");
2719 static struct pernet_operations __net_initdata dev_proc_ops = {
2720 .init = dev_proc_net_init,
2721 .exit = dev_proc_net_exit,
2724 static int __init dev_proc_init(void)
2726 return register_pernet_subsys(&dev_proc_ops);
2729 #define dev_proc_init() 0
2730 #endif /* CONFIG_PROC_FS */
2734 * netdev_set_master - set up master/slave pair
2735 * @slave: slave device
2736 * @master: new master device
2738 * Changes the master device of the slave. Pass %NULL to break the
2739 * bonding. The caller must hold the RTNL semaphore. On a failure
2740 * a negative errno code is returned. On success the reference counts
2741 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2742 * function returns zero.
2744 int netdev_set_master(struct net_device *slave, struct net_device *master)
2746 struct net_device *old = slave->master;
2756 slave->master = master;
2764 slave->flags |= IFF_SLAVE;
2766 slave->flags &= ~IFF_SLAVE;
2768 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2772 static void __dev_set_promiscuity(struct net_device *dev, int inc)
2774 unsigned short old_flags = dev->flags;
2778 if ((dev->promiscuity += inc) == 0)
2779 dev->flags &= ~IFF_PROMISC;
2781 dev->flags |= IFF_PROMISC;
2782 if (dev->flags != old_flags) {
2783 printk(KERN_INFO "device %s %s promiscuous mode\n",
2784 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2787 audit_log(current->audit_context, GFP_ATOMIC,
2788 AUDIT_ANOM_PROMISCUOUS,
2789 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2790 dev->name, (dev->flags & IFF_PROMISC),
2791 (old_flags & IFF_PROMISC),
2792 audit_get_loginuid(current),
2793 current->uid, current->gid,
2794 audit_get_sessionid(current));
2796 if (dev->change_rx_flags)
2797 dev->change_rx_flags(dev, IFF_PROMISC);
2802 * dev_set_promiscuity - update promiscuity count on a device
2806 * Add or remove promiscuity from a device. While the count in the device
2807 * remains above zero the interface remains promiscuous. Once it hits zero
2808 * the device reverts back to normal filtering operation. A negative inc
2809 * value is used to drop promiscuity on the device.
2811 void dev_set_promiscuity(struct net_device *dev, int inc)
2813 unsigned short old_flags = dev->flags;
2815 __dev_set_promiscuity(dev, inc);
2816 if (dev->flags != old_flags)
2817 dev_set_rx_mode(dev);
2821 * dev_set_allmulti - update allmulti count on a device
2825 * Add or remove reception of all multicast frames to a device. While the
2826 * count in the device remains above zero the interface remains listening
2827 * to all interfaces. Once it hits zero the device reverts back to normal
2828 * filtering operation. A negative @inc value is used to drop the counter
2829 * when releasing a resource needing all multicasts.
2832 void dev_set_allmulti(struct net_device *dev, int inc)
2834 unsigned short old_flags = dev->flags;
2838 dev->flags |= IFF_ALLMULTI;
2839 if ((dev->allmulti += inc) == 0)
2840 dev->flags &= ~IFF_ALLMULTI;
2841 if (dev->flags ^ old_flags) {
2842 if (dev->change_rx_flags)
2843 dev->change_rx_flags(dev, IFF_ALLMULTI);
2844 dev_set_rx_mode(dev);
2849 * Upload unicast and multicast address lists to device and
2850 * configure RX filtering. When the device doesn't support unicast
2851 * filtering it is put in promiscuous mode while unicast addresses
2854 void __dev_set_rx_mode(struct net_device *dev)
2856 /* dev_open will call this function so the list will stay sane. */
2857 if (!(dev->flags&IFF_UP))
2860 if (!netif_device_present(dev))
2863 if (dev->set_rx_mode)
2864 dev->set_rx_mode(dev);
2866 /* Unicast addresses changes may only happen under the rtnl,
2867 * therefore calling __dev_set_promiscuity here is safe.
2869 if (dev->uc_count > 0 && !dev->uc_promisc) {
2870 __dev_set_promiscuity(dev, 1);
2871 dev->uc_promisc = 1;
2872 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2873 __dev_set_promiscuity(dev, -1);
2874 dev->uc_promisc = 0;
2877 if (dev->set_multicast_list)
2878 dev->set_multicast_list(dev);
2882 void dev_set_rx_mode(struct net_device *dev)
2884 netif_tx_lock_bh(dev);
2885 __dev_set_rx_mode(dev);
2886 netif_tx_unlock_bh(dev);
2889 int __dev_addr_delete(struct dev_addr_list **list, int *count,
2890 void *addr, int alen, int glbl)
2892 struct dev_addr_list *da;
2894 for (; (da = *list) != NULL; list = &da->next) {
2895 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2896 alen == da->da_addrlen) {
2898 int old_glbl = da->da_gusers;
2915 int __dev_addr_add(struct dev_addr_list **list, int *count,
2916 void *addr, int alen, int glbl)
2918 struct dev_addr_list *da;
2920 for (da = *list; da != NULL; da = da->next) {
2921 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2922 da->da_addrlen == alen) {
2924 int old_glbl = da->da_gusers;
2934 da = kzalloc(sizeof(*da), GFP_ATOMIC);
2937 memcpy(da->da_addr, addr, alen);
2938 da->da_addrlen = alen;
2940 da->da_gusers = glbl ? 1 : 0;
2948 * dev_unicast_delete - Release secondary unicast address.
2950 * @addr: address to delete
2951 * @alen: length of @addr
2953 * Release reference to a secondary unicast address and remove it
2954 * from the device if the reference count drops to zero.
2956 * The caller must hold the rtnl_mutex.
2958 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2964 netif_tx_lock_bh(dev);
2965 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2967 __dev_set_rx_mode(dev);
2968 netif_tx_unlock_bh(dev);
2971 EXPORT_SYMBOL(dev_unicast_delete);
2974 * dev_unicast_add - add a secondary unicast address
2976 * @addr: address to add
2977 * @alen: length of @addr
2979 * Add a secondary unicast address to the device or increase
2980 * the reference count if it already exists.
2982 * The caller must hold the rtnl_mutex.
2984 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2990 netif_tx_lock_bh(dev);
2991 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2993 __dev_set_rx_mode(dev);
2994 netif_tx_unlock_bh(dev);
2997 EXPORT_SYMBOL(dev_unicast_add);
2999 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3000 struct dev_addr_list **from, int *from_count)
3002 struct dev_addr_list *da, *next;
3006 while (da != NULL) {
3008 if (!da->da_synced) {
3009 err = __dev_addr_add(to, to_count,
3010 da->da_addr, da->da_addrlen, 0);
3015 } else if (da->da_users == 1) {
3016 __dev_addr_delete(to, to_count,
3017 da->da_addr, da->da_addrlen, 0);
3018 __dev_addr_delete(from, from_count,
3019 da->da_addr, da->da_addrlen, 0);
3026 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3027 struct dev_addr_list **from, int *from_count)
3029 struct dev_addr_list *da, *next;
3032 while (da != NULL) {
3034 if (da->da_synced) {
3035 __dev_addr_delete(to, to_count,
3036 da->da_addr, da->da_addrlen, 0);
3038 __dev_addr_delete(from, from_count,
3039 da->da_addr, da->da_addrlen, 0);
3046 * dev_unicast_sync - Synchronize device's unicast list to another device
3047 * @to: destination device
3048 * @from: source device
3050 * Add newly added addresses to the destination device and release
3051 * addresses that have no users left. The source device must be
3052 * locked by netif_tx_lock_bh.
3054 * This function is intended to be called from the dev->set_rx_mode
3055 * function of layered software devices.
3057 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3061 netif_tx_lock_bh(to);
3062 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3063 &from->uc_list, &from->uc_count);
3065 __dev_set_rx_mode(to);
3066 netif_tx_unlock_bh(to);
3069 EXPORT_SYMBOL(dev_unicast_sync);
3072 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3073 * @to: destination device
3074 * @from: source device
3076 * Remove all addresses that were added to the destination device by
3077 * dev_unicast_sync(). This function is intended to be called from the
3078 * dev->stop function of layered software devices.
3080 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3082 netif_tx_lock_bh(from);
3083 netif_tx_lock_bh(to);
3085 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3086 &from->uc_list, &from->uc_count);
3087 __dev_set_rx_mode(to);
3089 netif_tx_unlock_bh(to);
3090 netif_tx_unlock_bh(from);
3092 EXPORT_SYMBOL(dev_unicast_unsync);
3094 static void __dev_addr_discard(struct dev_addr_list **list)
3096 struct dev_addr_list *tmp;
3098 while (*list != NULL) {
3101 if (tmp->da_users > tmp->da_gusers)
3102 printk("__dev_addr_discard: address leakage! "
3103 "da_users=%d\n", tmp->da_users);
3108 static void dev_addr_discard(struct net_device *dev)
3110 netif_tx_lock_bh(dev);
3112 __dev_addr_discard(&dev->uc_list);
3115 __dev_addr_discard(&dev->mc_list);
3118 netif_tx_unlock_bh(dev);
3121 unsigned dev_get_flags(const struct net_device *dev)
3125 flags = (dev->flags & ~(IFF_PROMISC |
3130 (dev->gflags & (IFF_PROMISC |
3133 if (netif_running(dev)) {
3134 if (netif_oper_up(dev))
3135 flags |= IFF_RUNNING;
3136 if (netif_carrier_ok(dev))
3137 flags |= IFF_LOWER_UP;
3138 if (netif_dormant(dev))
3139 flags |= IFF_DORMANT;
3145 int dev_change_flags(struct net_device *dev, unsigned flags)
3148 int old_flags = dev->flags;
3153 * Set the flags on our device.
3156 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3157 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3159 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3163 * Load in the correct multicast list now the flags have changed.
3166 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3167 dev->change_rx_flags(dev, IFF_MULTICAST);
3169 dev_set_rx_mode(dev);
3172 * Have we downed the interface. We handle IFF_UP ourselves
3173 * according to user attempts to set it, rather than blindly
3178 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3179 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3182 dev_set_rx_mode(dev);
3185 if (dev->flags & IFF_UP &&
3186 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3188 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3190 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3191 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3192 dev->gflags ^= IFF_PROMISC;
3193 dev_set_promiscuity(dev, inc);
3196 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3197 is important. Some (broken) drivers set IFF_PROMISC, when
3198 IFF_ALLMULTI is requested not asking us and not reporting.
3200 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3201 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3202 dev->gflags ^= IFF_ALLMULTI;
3203 dev_set_allmulti(dev, inc);
3206 /* Exclude state transition flags, already notified */
3207 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3209 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3214 int dev_set_mtu(struct net_device *dev, int new_mtu)
3218 if (new_mtu == dev->mtu)
3221 /* MTU must be positive. */
3225 if (!netif_device_present(dev))
3229 if (dev->change_mtu)
3230 err = dev->change_mtu(dev, new_mtu);
3233 if (!err && dev->flags & IFF_UP)
3234 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3238 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3242 if (!dev->set_mac_address)
3244 if (sa->sa_family != dev->type)
3246 if (!netif_device_present(dev))
3248 err = dev->set_mac_address(dev, sa);
3250 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3255 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3257 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3260 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3266 case SIOCGIFFLAGS: /* Get interface flags */
3267 ifr->ifr_flags = dev_get_flags(dev);
3270 case SIOCGIFMETRIC: /* Get the metric on the interface
3271 (currently unused) */
3272 ifr->ifr_metric = 0;
3275 case SIOCGIFMTU: /* Get the MTU of a device */
3276 ifr->ifr_mtu = dev->mtu;
3281 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3283 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3284 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3285 ifr->ifr_hwaddr.sa_family = dev->type;
3293 ifr->ifr_map.mem_start = dev->mem_start;
3294 ifr->ifr_map.mem_end = dev->mem_end;
3295 ifr->ifr_map.base_addr = dev->base_addr;
3296 ifr->ifr_map.irq = dev->irq;
3297 ifr->ifr_map.dma = dev->dma;
3298 ifr->ifr_map.port = dev->if_port;
3302 ifr->ifr_ifindex = dev->ifindex;
3306 ifr->ifr_qlen = dev->tx_queue_len;
3310 /* dev_ioctl() should ensure this case
3322 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3324 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3327 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3333 case SIOCSIFFLAGS: /* Set interface flags */
3334 return dev_change_flags(dev, ifr->ifr_flags);
3336 case SIOCSIFMETRIC: /* Set the metric on the interface
3337 (currently unused) */
3340 case SIOCSIFMTU: /* Set the MTU of a device */
3341 return dev_set_mtu(dev, ifr->ifr_mtu);
3344 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3346 case SIOCSIFHWBROADCAST:
3347 if (ifr->ifr_hwaddr.sa_family != dev->type)
3349 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3350 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3351 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3355 if (dev->set_config) {
3356 if (!netif_device_present(dev))
3358 return dev->set_config(dev, &ifr->ifr_map);
3363 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3364 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3366 if (!netif_device_present(dev))
3368 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3372 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3373 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3375 if (!netif_device_present(dev))
3377 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3381 if (ifr->ifr_qlen < 0)
3383 dev->tx_queue_len = ifr->ifr_qlen;
3387 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3388 return dev_change_name(dev, ifr->ifr_newname);
3391 * Unknown or private ioctl
3395 if ((cmd >= SIOCDEVPRIVATE &&
3396 cmd <= SIOCDEVPRIVATE + 15) ||
3397 cmd == SIOCBONDENSLAVE ||
3398 cmd == SIOCBONDRELEASE ||
3399 cmd == SIOCBONDSETHWADDR ||
3400 cmd == SIOCBONDSLAVEINFOQUERY ||
3401 cmd == SIOCBONDINFOQUERY ||
3402 cmd == SIOCBONDCHANGEACTIVE ||
3403 cmd == SIOCGMIIPHY ||
3404 cmd == SIOCGMIIREG ||
3405 cmd == SIOCSMIIREG ||
3406 cmd == SIOCBRADDIF ||
3407 cmd == SIOCBRDELIF ||
3408 cmd == SIOCWANDEV) {
3410 if (dev->do_ioctl) {
3411 if (netif_device_present(dev))
3412 err = dev->do_ioctl(dev, ifr,
3425 * This function handles all "interface"-type I/O control requests. The actual
3426 * 'doing' part of this is dev_ifsioc above.
3430 * dev_ioctl - network device ioctl
3431 * @net: the applicable net namespace
3432 * @cmd: command to issue
3433 * @arg: pointer to a struct ifreq in user space
3435 * Issue ioctl functions to devices. This is normally called by the
3436 * user space syscall interfaces but can sometimes be useful for
3437 * other purposes. The return value is the return from the syscall if
3438 * positive or a negative errno code on error.
3441 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3447 /* One special case: SIOCGIFCONF takes ifconf argument
3448 and requires shared lock, because it sleeps writing
3452 if (cmd == SIOCGIFCONF) {
3454 ret = dev_ifconf(net, (char __user *) arg);
3458 if (cmd == SIOCGIFNAME)
3459 return dev_ifname(net, (struct ifreq __user *)arg);
3461 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3464 ifr.ifr_name[IFNAMSIZ-1] = 0;
3466 colon = strchr(ifr.ifr_name, ':');
3471 * See which interface the caller is talking about.
3476 * These ioctl calls:
3477 * - can be done by all.
3478 * - atomic and do not require locking.
3489 dev_load(net, ifr.ifr_name);
3490 read_lock(&dev_base_lock);
3491 ret = dev_ifsioc_locked(net, &ifr, cmd);
3492 read_unlock(&dev_base_lock);
3496 if (copy_to_user(arg, &ifr,
3497 sizeof(struct ifreq)))
3503 dev_load(net, ifr.ifr_name);
3505 ret = dev_ethtool(net, &ifr);
3510 if (copy_to_user(arg, &ifr,
3511 sizeof(struct ifreq)))
3517 * These ioctl calls:
3518 * - require superuser power.
3519 * - require strict serialization.
3525 if (!capable(CAP_NET_ADMIN))
3527 dev_load(net, ifr.ifr_name);
3529 ret = dev_ifsioc(net, &ifr, cmd);
3534 if (copy_to_user(arg, &ifr,
3535 sizeof(struct ifreq)))
3541 * These ioctl calls:
3542 * - require superuser power.
3543 * - require strict serialization.
3544 * - do not return a value
3554 case SIOCSIFHWBROADCAST:
3557 case SIOCBONDENSLAVE:
3558 case SIOCBONDRELEASE:
3559 case SIOCBONDSETHWADDR:
3560 case SIOCBONDCHANGEACTIVE:
3563 if (!capable(CAP_NET_ADMIN))
3566 case SIOCBONDSLAVEINFOQUERY:
3567 case SIOCBONDINFOQUERY:
3568 dev_load(net, ifr.ifr_name);
3570 ret = dev_ifsioc(net, &ifr, cmd);
3575 /* Get the per device memory space. We can add this but
3576 * currently do not support it */
3578 /* Set the per device memory buffer space.
3579 * Not applicable in our case */
3584 * Unknown or private ioctl.
3587 if (cmd == SIOCWANDEV ||
3588 (cmd >= SIOCDEVPRIVATE &&
3589 cmd <= SIOCDEVPRIVATE + 15)) {
3590 dev_load(net, ifr.ifr_name);
3592 ret = dev_ifsioc(net, &ifr, cmd);
3594 if (!ret && copy_to_user(arg, &ifr,
3595 sizeof(struct ifreq)))
3599 /* Take care of Wireless Extensions */
3600 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3601 return wext_handle_ioctl(net, &ifr, cmd, arg);
3608 * dev_new_index - allocate an ifindex
3609 * @net: the applicable net namespace
3611 * Returns a suitable unique value for a new device interface
3612 * number. The caller must hold the rtnl semaphore or the
3613 * dev_base_lock to be sure it remains unique.
3615 static int dev_new_index(struct net *net)
3621 if (!__dev_get_by_index(net, ifindex))
3626 /* Delayed registration/unregisteration */
3627 static DEFINE_SPINLOCK(net_todo_list_lock);
3628 static LIST_HEAD(net_todo_list);
3630 static void net_set_todo(struct net_device *dev)
3632 spin_lock(&net_todo_list_lock);
3633 list_add_tail(&dev->todo_list, &net_todo_list);
3634 spin_unlock(&net_todo_list_lock);
3637 static void rollback_registered(struct net_device *dev)
3639 BUG_ON(dev_boot_phase);
3642 /* Some devices call without registering for initialization unwind. */
3643 if (dev->reg_state == NETREG_UNINITIALIZED) {
3644 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3645 "was registered\n", dev->name, dev);
3651 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3653 /* If device is running, close it first. */
3656 /* And unlink it from device chain. */
3657 unlist_netdevice(dev);
3659 dev->reg_state = NETREG_UNREGISTERING;
3663 /* Shutdown queueing discipline. */
3667 /* Notify protocols, that we are about to destroy
3668 this device. They should clean all the things.
3670 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3673 * Flush the unicast and multicast chains
3675 dev_addr_discard(dev);
3680 /* Notifier chain MUST detach us from master device. */
3681 BUG_TRAP(!dev->master);
3683 /* Remove entries from kobject tree */
3684 netdev_unregister_kobject(dev);
3692 * register_netdevice - register a network device
3693 * @dev: device to register
3695 * Take a completed network device structure and add it to the kernel
3696 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3697 * chain. 0 is returned on success. A negative errno code is returned
3698 * on a failure to set up the device, or if the name is a duplicate.
3700 * Callers must hold the rtnl semaphore. You may want
3701 * register_netdev() instead of this.
3704 * The locking appears insufficient to guarantee two parallel registers
3705 * will not get the same name.
3708 int register_netdevice(struct net_device *dev)
3710 struct hlist_head *head;
3711 struct hlist_node *p;
3715 BUG_ON(dev_boot_phase);
3720 /* When net_device's are persistent, this will be fatal. */
3721 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3722 BUG_ON(!dev_net(dev));
3725 spin_lock_init(&dev->queue_lock);
3726 spin_lock_init(&dev->_xmit_lock);
3727 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3728 dev->xmit_lock_owner = -1;
3729 spin_lock_init(&dev->ingress_lock);
3733 /* Init, if this function is available */
3735 ret = dev->init(dev);
3743 if (!dev_valid_name(dev->name)) {
3748 dev->ifindex = dev_new_index(net);
3749 if (dev->iflink == -1)
3750 dev->iflink = dev->ifindex;
3752 /* Check for existence of name */
3753 head = dev_name_hash(net, dev->name);
3754 hlist_for_each(p, head) {
3755 struct net_device *d
3756 = hlist_entry(p, struct net_device, name_hlist);
3757 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3763 /* Fix illegal checksum combinations */
3764 if ((dev->features & NETIF_F_HW_CSUM) &&
3765 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3766 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3768 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3771 if ((dev->features & NETIF_F_NO_CSUM) &&
3772 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3773 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3775 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3779 /* Fix illegal SG+CSUM combinations. */
3780 if ((dev->features & NETIF_F_SG) &&
3781 !(dev->features & NETIF_F_ALL_CSUM)) {
3782 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3784 dev->features &= ~NETIF_F_SG;
3787 /* TSO requires that SG is present as well. */
3788 if ((dev->features & NETIF_F_TSO) &&
3789 !(dev->features & NETIF_F_SG)) {
3790 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3792 dev->features &= ~NETIF_F_TSO;
3794 if (dev->features & NETIF_F_UFO) {
3795 if (!(dev->features & NETIF_F_HW_CSUM)) {
3796 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3797 "NETIF_F_HW_CSUM feature.\n",
3799 dev->features &= ~NETIF_F_UFO;
3801 if (!(dev->features & NETIF_F_SG)) {
3802 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3803 "NETIF_F_SG feature.\n",
3805 dev->features &= ~NETIF_F_UFO;
3809 netdev_initialize_kobject(dev);
3810 ret = netdev_register_kobject(dev);
3813 dev->reg_state = NETREG_REGISTERED;
3816 * Default initial state at registry is that the
3817 * device is present.
3820 set_bit(__LINK_STATE_PRESENT, &dev->state);
3822 dev_init_scheduler(dev);
3824 list_netdevice(dev);
3826 /* Notify protocols, that a new device appeared. */
3827 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3828 ret = notifier_to_errno(ret);
3830 rollback_registered(dev);
3831 dev->reg_state = NETREG_UNREGISTERED;
3844 * register_netdev - register a network device
3845 * @dev: device to register
3847 * Take a completed network device structure and add it to the kernel
3848 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3849 * chain. 0 is returned on success. A negative errno code is returned
3850 * on a failure to set up the device, or if the name is a duplicate.
3852 * This is a wrapper around register_netdevice that takes the rtnl semaphore
3853 * and expands the device name if you passed a format string to
3856 int register_netdev(struct net_device *dev)
3863 * If the name is a format string the caller wants us to do a
3866 if (strchr(dev->name, '%')) {
3867 err = dev_alloc_name(dev, dev->name);
3872 err = register_netdevice(dev);
3877 EXPORT_SYMBOL(register_netdev);
3880 * netdev_wait_allrefs - wait until all references are gone.
3882 * This is called when unregistering network devices.
3884 * Any protocol or device that holds a reference should register
3885 * for netdevice notification, and cleanup and put back the
3886 * reference if they receive an UNREGISTER event.
3887 * We can get stuck here if buggy protocols don't correctly
3890 static void netdev_wait_allrefs(struct net_device *dev)
3892 unsigned long rebroadcast_time, warning_time;
3894 rebroadcast_time = warning_time = jiffies;
3895 while (atomic_read(&dev->refcnt) != 0) {
3896 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3899 /* Rebroadcast unregister notification */
3900 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3902 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3904 /* We must not have linkwatch events
3905 * pending on unregister. If this
3906 * happens, we simply run the queue
3907 * unscheduled, resulting in a noop
3910 linkwatch_run_queue();
3915 rebroadcast_time = jiffies;
3920 if (time_after(jiffies, warning_time + 10 * HZ)) {
3921 printk(KERN_EMERG "unregister_netdevice: "
3922 "waiting for %s to become free. Usage "
3924 dev->name, atomic_read(&dev->refcnt));
3925 warning_time = jiffies;
3934 * register_netdevice(x1);
3935 * register_netdevice(x2);
3937 * unregister_netdevice(y1);
3938 * unregister_netdevice(y2);
3944 * We are invoked by rtnl_unlock() after it drops the semaphore.
3945 * This allows us to deal with problems:
3946 * 1) We can delete sysfs objects which invoke hotplug
3947 * without deadlocking with linkwatch via keventd.
3948 * 2) Since we run with the RTNL semaphore not held, we can sleep
3949 * safely in order to wait for the netdev refcnt to drop to zero.
3951 static DEFINE_MUTEX(net_todo_run_mutex);
3952 void netdev_run_todo(void)
3954 struct list_head list;
3956 /* Need to guard against multiple cpu's getting out of order. */
3957 mutex_lock(&net_todo_run_mutex);
3959 /* Not safe to do outside the semaphore. We must not return
3960 * until all unregister events invoked by the local processor
3961 * have been completed (either by this todo run, or one on
3964 if (list_empty(&net_todo_list))
3967 /* Snapshot list, allow later requests */
3968 spin_lock(&net_todo_list_lock);
3969 list_replace_init(&net_todo_list, &list);
3970 spin_unlock(&net_todo_list_lock);
3972 while (!list_empty(&list)) {
3973 struct net_device *dev
3974 = list_entry(list.next, struct net_device, todo_list);
3975 list_del(&dev->todo_list);
3977 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3978 printk(KERN_ERR "network todo '%s' but state %d\n",
3979 dev->name, dev->reg_state);
3984 dev->reg_state = NETREG_UNREGISTERED;
3986 netdev_wait_allrefs(dev);
3989 BUG_ON(atomic_read(&dev->refcnt));
3990 BUG_TRAP(!dev->ip_ptr);
3991 BUG_TRAP(!dev->ip6_ptr);
3992 BUG_TRAP(!dev->dn_ptr);
3994 if (dev->destructor)
3995 dev->destructor(dev);
3997 /* Free network device */
3998 kobject_put(&dev->dev.kobj);
4002 mutex_unlock(&net_todo_run_mutex);
4005 static struct net_device_stats *internal_stats(struct net_device *dev)
4011 * alloc_netdev_mq - allocate network device
4012 * @sizeof_priv: size of private data to allocate space for
4013 * @name: device name format string
4014 * @setup: callback to initialize device
4015 * @queue_count: the number of subqueues to allocate
4017 * Allocates a struct net_device with private data area for driver use
4018 * and performs basic initialization. Also allocates subquue structs
4019 * for each queue on the device at the end of the netdevice.
4021 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4022 void (*setup)(struct net_device *), unsigned int queue_count)
4025 struct net_device *dev;
4028 BUG_ON(strlen(name) >= sizeof(dev->name));
4030 alloc_size = sizeof(struct net_device) +
4031 sizeof(struct net_device_subqueue) * (queue_count - 1);
4033 /* ensure 32-byte alignment of private area */
4034 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4035 alloc_size += sizeof_priv;
4037 /* ensure 32-byte alignment of whole construct */
4038 alloc_size += NETDEV_ALIGN_CONST;
4040 p = kzalloc(alloc_size, GFP_KERNEL);
4042 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4046 dev = (struct net_device *)
4047 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4048 dev->padded = (char *)dev - (char *)p;
4049 dev_net_set(dev, &init_net);
4052 dev->priv = ((char *)dev +
4053 ((sizeof(struct net_device) +
4054 (sizeof(struct net_device_subqueue) *
4055 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4056 & ~NETDEV_ALIGN_CONST));
4059 dev->egress_subqueue_count = queue_count;
4060 dev->gso_max_size = GSO_MAX_SIZE;
4062 dev->get_stats = internal_stats;
4063 netpoll_netdev_init(dev);
4065 strcpy(dev->name, name);
4068 EXPORT_SYMBOL(alloc_netdev_mq);
4071 * free_netdev - free network device
4074 * This function does the last stage of destroying an allocated device
4075 * interface. The reference to the device object is released.
4076 * If this is the last reference then it will be freed.
4078 void free_netdev(struct net_device *dev)
4080 release_net(dev_net(dev));
4082 /* Compatibility with error handling in drivers */
4083 if (dev->reg_state == NETREG_UNINITIALIZED) {
4084 kfree((char *)dev - dev->padded);
4088 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4089 dev->reg_state = NETREG_RELEASED;
4091 /* will free via device release */
4092 put_device(&dev->dev);
4095 /* Synchronize with packet receive processing. */
4096 void synchronize_net(void)
4103 * unregister_netdevice - remove device from the kernel
4106 * This function shuts down a device interface and removes it
4107 * from the kernel tables.
4109 * Callers must hold the rtnl semaphore. You may want
4110 * unregister_netdev() instead of this.
4113 void unregister_netdevice(struct net_device *dev)
4117 rollback_registered(dev);
4118 /* Finish processing unregister after unlock */
4123 * unregister_netdev - remove device from the kernel
4126 * This function shuts down a device interface and removes it
4127 * from the kernel tables.
4129 * This is just a wrapper for unregister_netdevice that takes
4130 * the rtnl semaphore. In general you want to use this and not
4131 * unregister_netdevice.
4133 void unregister_netdev(struct net_device *dev)
4136 unregister_netdevice(dev);
4140 EXPORT_SYMBOL(unregister_netdev);
4143 * dev_change_net_namespace - move device to different nethost namespace
4145 * @net: network namespace
4146 * @pat: If not NULL name pattern to try if the current device name
4147 * is already taken in the destination network namespace.
4149 * This function shuts down a device interface and moves it
4150 * to a new network namespace. On success 0 is returned, on
4151 * a failure a netagive errno code is returned.
4153 * Callers must hold the rtnl semaphore.
4156 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4159 const char *destname;
4164 /* Don't allow namespace local devices to be moved. */
4166 if (dev->features & NETIF_F_NETNS_LOCAL)
4169 /* Ensure the device has been registrered */
4171 if (dev->reg_state != NETREG_REGISTERED)
4174 /* Get out if there is nothing todo */
4176 if (net_eq(dev_net(dev), net))
4179 /* Pick the destination device name, and ensure
4180 * we can use it in the destination network namespace.
4183 destname = dev->name;
4184 if (__dev_get_by_name(net, destname)) {
4185 /* We get here if we can't use the current device name */
4188 if (!dev_valid_name(pat))
4190 if (strchr(pat, '%')) {
4191 if (__dev_alloc_name(net, pat, buf) < 0)
4196 if (__dev_get_by_name(net, destname))
4201 * And now a mini version of register_netdevice unregister_netdevice.
4204 /* If device is running close it first. */
4207 /* And unlink it from device chain */
4209 unlist_netdevice(dev);
4213 /* Shutdown queueing discipline. */
4216 /* Notify protocols, that we are about to destroy
4217 this device. They should clean all the things.
4219 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4222 * Flush the unicast and multicast chains
4224 dev_addr_discard(dev);
4226 /* Actually switch the network namespace */
4227 dev_net_set(dev, net);
4229 /* Assign the new device name */
4230 if (destname != dev->name)
4231 strcpy(dev->name, destname);
4233 /* If there is an ifindex conflict assign a new one */
4234 if (__dev_get_by_index(net, dev->ifindex)) {
4235 int iflink = (dev->iflink == dev->ifindex);
4236 dev->ifindex = dev_new_index(net);
4238 dev->iflink = dev->ifindex;
4241 /* Fixup kobjects */
4242 netdev_unregister_kobject(dev);
4243 err = netdev_register_kobject(dev);
4246 /* Add the device back in the hashes */
4247 list_netdevice(dev);
4249 /* Notify protocols, that a new device appeared. */
4250 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4258 static int dev_cpu_callback(struct notifier_block *nfb,
4259 unsigned long action,
4262 struct sk_buff **list_skb;
4263 struct net_device **list_net;
4264 struct sk_buff *skb;
4265 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4266 struct softnet_data *sd, *oldsd;
4268 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4271 local_irq_disable();
4272 cpu = smp_processor_id();
4273 sd = &per_cpu(softnet_data, cpu);
4274 oldsd = &per_cpu(softnet_data, oldcpu);
4276 /* Find end of our completion_queue. */
4277 list_skb = &sd->completion_queue;
4279 list_skb = &(*list_skb)->next;
4280 /* Append completion queue from offline CPU. */
4281 *list_skb = oldsd->completion_queue;
4282 oldsd->completion_queue = NULL;
4284 /* Find end of our output_queue. */
4285 list_net = &sd->output_queue;
4287 list_net = &(*list_net)->next_sched;
4288 /* Append output queue from offline CPU. */
4289 *list_net = oldsd->output_queue;
4290 oldsd->output_queue = NULL;
4292 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4295 /* Process offline CPU's input_pkt_queue */
4296 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4302 #ifdef CONFIG_NET_DMA
4304 * net_dma_rebalance - try to maintain one DMA channel per CPU
4305 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4307 * This is called when the number of channels allocated to the net_dma client
4308 * changes. The net_dma client tries to have one DMA channel per CPU.
4311 static void net_dma_rebalance(struct net_dma *net_dma)
4313 unsigned int cpu, i, n, chan_idx;
4314 struct dma_chan *chan;
4316 if (cpus_empty(net_dma->channel_mask)) {
4317 for_each_online_cpu(cpu)
4318 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4323 cpu = first_cpu(cpu_online_map);
4325 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4326 chan = net_dma->channels[chan_idx];
4328 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4329 + (i < (num_online_cpus() %
4330 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4333 per_cpu(softnet_data, cpu).net_dma = chan;
4334 cpu = next_cpu(cpu, cpu_online_map);
4342 * netdev_dma_event - event callback for the net_dma_client
4343 * @client: should always be net_dma_client
4344 * @chan: DMA channel for the event
4345 * @state: DMA state to be handled
4347 static enum dma_state_client
4348 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4349 enum dma_state state)
4351 int i, found = 0, pos = -1;
4352 struct net_dma *net_dma =
4353 container_of(client, struct net_dma, client);
4354 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4356 spin_lock(&net_dma->lock);
4358 case DMA_RESOURCE_AVAILABLE:
4359 for (i = 0; i < nr_cpu_ids; i++)
4360 if (net_dma->channels[i] == chan) {
4363 } else if (net_dma->channels[i] == NULL && pos < 0)
4366 if (!found && pos >= 0) {
4368 net_dma->channels[pos] = chan;
4369 cpu_set(pos, net_dma->channel_mask);
4370 net_dma_rebalance(net_dma);
4373 case DMA_RESOURCE_REMOVED:
4374 for (i = 0; i < nr_cpu_ids; i++)
4375 if (net_dma->channels[i] == chan) {
4383 cpu_clear(pos, net_dma->channel_mask);
4384 net_dma->channels[i] = NULL;
4385 net_dma_rebalance(net_dma);
4391 spin_unlock(&net_dma->lock);
4397 * netdev_dma_regiser - register the networking subsystem as a DMA client
4399 static int __init netdev_dma_register(void)
4401 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4403 if (unlikely(!net_dma.channels)) {
4405 "netdev_dma: no memory for net_dma.channels\n");
4408 spin_lock_init(&net_dma.lock);
4409 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4410 dma_async_client_register(&net_dma.client);
4411 dma_async_client_chan_request(&net_dma.client);
4416 static int __init netdev_dma_register(void) { return -ENODEV; }
4417 #endif /* CONFIG_NET_DMA */
4420 * netdev_compute_feature - compute conjunction of two feature sets
4421 * @all: first feature set
4422 * @one: second feature set
4424 * Computes a new feature set after adding a device with feature set
4425 * @one to the master device with current feature set @all. Returns
4426 * the new feature set.
4428 int netdev_compute_features(unsigned long all, unsigned long one)
4430 /* if device needs checksumming, downgrade to hw checksumming */
4431 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4432 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4434 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4435 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4436 all ^= NETIF_F_HW_CSUM
4437 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4439 if (one & NETIF_F_GSO)
4440 one |= NETIF_F_GSO_SOFTWARE;
4443 /* If even one device supports robust GSO, enable it for all. */
4444 if (one & NETIF_F_GSO_ROBUST)
4445 all |= NETIF_F_GSO_ROBUST;
4447 all &= one | NETIF_F_LLTX;
4449 if (!(all & NETIF_F_ALL_CSUM))
4451 if (!(all & NETIF_F_SG))
4452 all &= ~NETIF_F_GSO_MASK;
4456 EXPORT_SYMBOL(netdev_compute_features);
4458 static struct hlist_head *netdev_create_hash(void)
4461 struct hlist_head *hash;
4463 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4465 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4466 INIT_HLIST_HEAD(&hash[i]);
4471 /* Initialize per network namespace state */
4472 static int __net_init netdev_init(struct net *net)
4474 INIT_LIST_HEAD(&net->dev_base_head);
4476 net->dev_name_head = netdev_create_hash();
4477 if (net->dev_name_head == NULL)
4480 net->dev_index_head = netdev_create_hash();
4481 if (net->dev_index_head == NULL)
4487 kfree(net->dev_name_head);
4492 static void __net_exit netdev_exit(struct net *net)
4494 kfree(net->dev_name_head);
4495 kfree(net->dev_index_head);
4498 static struct pernet_operations __net_initdata netdev_net_ops = {
4499 .init = netdev_init,
4500 .exit = netdev_exit,
4503 static void __net_exit default_device_exit(struct net *net)
4505 struct net_device *dev, *next;
4507 * Push all migratable of the network devices back to the
4508 * initial network namespace
4511 for_each_netdev_safe(net, dev, next) {
4513 char fb_name[IFNAMSIZ];
4515 /* Ignore unmoveable devices (i.e. loopback) */
4516 if (dev->features & NETIF_F_NETNS_LOCAL)
4519 /* Push remaing network devices to init_net */
4520 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4521 err = dev_change_net_namespace(dev, &init_net, fb_name);
4523 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4524 __func__, dev->name, err);
4531 static struct pernet_operations __net_initdata default_device_ops = {
4532 .exit = default_device_exit,
4536 * Initialize the DEV module. At boot time this walks the device list and
4537 * unhooks any devices that fail to initialise (normally hardware not
4538 * present) and leaves us with a valid list of present and active devices.
4543 * This is called single threaded during boot, so no need
4544 * to take the rtnl semaphore.
4546 static int __init net_dev_init(void)
4548 int i, rc = -ENOMEM;
4550 BUG_ON(!dev_boot_phase);
4552 if (dev_proc_init())
4555 if (netdev_kobject_init())
4558 INIT_LIST_HEAD(&ptype_all);
4559 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4560 INIT_LIST_HEAD(&ptype_base[i]);
4562 if (register_pernet_subsys(&netdev_net_ops))
4565 if (register_pernet_device(&default_device_ops))
4569 * Initialise the packet receive queues.
4572 for_each_possible_cpu(i) {
4573 struct softnet_data *queue;
4575 queue = &per_cpu(softnet_data, i);
4576 skb_queue_head_init(&queue->input_pkt_queue);
4577 queue->completion_queue = NULL;
4578 INIT_LIST_HEAD(&queue->poll_list);
4580 queue->backlog.poll = process_backlog;
4581 queue->backlog.weight = weight_p;
4584 netdev_dma_register();
4588 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4589 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4591 hotcpu_notifier(dev_cpu_callback, 0);
4599 subsys_initcall(net_dev_init);
4601 EXPORT_SYMBOL(__dev_get_by_index);
4602 EXPORT_SYMBOL(__dev_get_by_name);
4603 EXPORT_SYMBOL(__dev_remove_pack);
4604 EXPORT_SYMBOL(dev_valid_name);
4605 EXPORT_SYMBOL(dev_add_pack);
4606 EXPORT_SYMBOL(dev_alloc_name);
4607 EXPORT_SYMBOL(dev_close);
4608 EXPORT_SYMBOL(dev_get_by_flags);
4609 EXPORT_SYMBOL(dev_get_by_index);
4610 EXPORT_SYMBOL(dev_get_by_name);
4611 EXPORT_SYMBOL(dev_open);
4612 EXPORT_SYMBOL(dev_queue_xmit);
4613 EXPORT_SYMBOL(dev_remove_pack);
4614 EXPORT_SYMBOL(dev_set_allmulti);
4615 EXPORT_SYMBOL(dev_set_promiscuity);
4616 EXPORT_SYMBOL(dev_change_flags);
4617 EXPORT_SYMBOL(dev_set_mtu);
4618 EXPORT_SYMBOL(dev_set_mac_address);
4619 EXPORT_SYMBOL(free_netdev);
4620 EXPORT_SYMBOL(netdev_boot_setup_check);
4621 EXPORT_SYMBOL(netdev_set_master);
4622 EXPORT_SYMBOL(netdev_state_change);
4623 EXPORT_SYMBOL(netif_receive_skb);
4624 EXPORT_SYMBOL(netif_rx);
4625 EXPORT_SYMBOL(register_gifconf);
4626 EXPORT_SYMBOL(register_netdevice);
4627 EXPORT_SYMBOL(register_netdevice_notifier);
4628 EXPORT_SYMBOL(skb_checksum_help);
4629 EXPORT_SYMBOL(synchronize_net);
4630 EXPORT_SYMBOL(unregister_netdevice);
4631 EXPORT_SYMBOL(unregister_netdevice_notifier);
4632 EXPORT_SYMBOL(net_enable_timestamp);
4633 EXPORT_SYMBOL(net_disable_timestamp);
4634 EXPORT_SYMBOL(dev_get_flags);
4636 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4637 EXPORT_SYMBOL(br_handle_frame_hook);
4638 EXPORT_SYMBOL(br_fdb_get_hook);
4639 EXPORT_SYMBOL(br_fdb_put_hook);
4643 EXPORT_SYMBOL(dev_load);
4646 EXPORT_PER_CPU_SYMBOL(softnet_data);