2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
132 /* Instead of increasing this, you should create a hash table. */
133 #define MAX_GRO_SKBS 8
136 * The list of packet types we will receive (as opposed to discard)
137 * and the routines to invoke.
139 * Why 16. Because with 16 the only overlap we get on a hash of the
140 * low nibble of the protocol value is RARP/SNAP/X.25.
142 * NOTE: That is no longer true with the addition of VLAN tags. Not
143 * sure which should go first, but I bet it won't make much
144 * difference if we are running VLANs. The good news is that
145 * this protocol won't be in the list unless compiled in, so
146 * the average user (w/out VLANs) will not be adversely affected.
163 #define PTYPE_HASH_SIZE (16)
164 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
166 static DEFINE_SPINLOCK(ptype_lock);
167 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
168 static struct list_head ptype_all __read_mostly; /* Taps */
170 #ifdef CONFIG_NET_DMA
172 struct dma_client client;
174 cpumask_t channel_mask;
175 struct dma_chan **channels;
178 static enum dma_state_client
179 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
180 enum dma_state state);
182 static struct net_dma net_dma = {
184 .event_callback = netdev_dma_event,
190 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
193 * Pure readers hold dev_base_lock for reading.
195 * Writers must hold the rtnl semaphore while they loop through the
196 * dev_base_head list, and hold dev_base_lock for writing when they do the
197 * actual updates. This allows pure readers to access the list even
198 * while a writer is preparing to update it.
200 * To put it another way, dev_base_lock is held for writing only to
201 * protect against pure readers; the rtnl semaphore provides the
202 * protection against other writers.
204 * See, for example usages, register_netdevice() and
205 * unregister_netdevice(), which must be called with the rtnl
208 DEFINE_RWLOCK(dev_base_lock);
210 EXPORT_SYMBOL(dev_base_lock);
212 #define NETDEV_HASHBITS 8
213 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
215 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
217 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
218 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
221 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
223 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
226 /* Device list insertion */
227 static int list_netdevice(struct net_device *dev)
229 struct net *net = dev_net(dev);
233 write_lock_bh(&dev_base_lock);
234 list_add_tail(&dev->dev_list, &net->dev_base_head);
235 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
236 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
237 write_unlock_bh(&dev_base_lock);
241 /* Device list removal */
242 static void unlist_netdevice(struct net_device *dev)
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
248 list_del(&dev->dev_list);
249 hlist_del(&dev->name_hlist);
250 hlist_del(&dev->index_hlist);
251 write_unlock_bh(&dev_base_lock);
258 static RAW_NOTIFIER_HEAD(netdev_chain);
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
265 DEFINE_PER_CPU(struct softnet_data, softnet_data);
267 #ifdef CONFIG_LOCKDEP
269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270 * according to dev->type
272 static const unsigned short netdev_lock_type[] =
273 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
285 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
286 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
287 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
289 static const char *netdev_lock_name[] =
290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
303 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
304 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
306 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314 if (netdev_lock_type[i] == dev_type)
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type) - 1;
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321 unsigned short dev_type)
325 i = netdev_lock_pos(dev_type);
326 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327 netdev_lock_name[i]);
330 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334 i = netdev_lock_pos(dev->type);
335 lockdep_set_class_and_name(&dev->addr_list_lock,
336 &netdev_addr_lock_key[i],
337 netdev_lock_name[i]);
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
344 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 /*******************************************************************************
351 Protocol management and registration routines
353 *******************************************************************************/
356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
372 * dev_add_pack - add packet handler
373 * @pt: packet type declaration
375 * Add a protocol handler to the networking stack. The passed &packet_type
376 * is linked into kernel lists and may not be freed until it has been
377 * removed from the kernel lists.
379 * This call does not sleep therefore it can not
380 * guarantee all CPU's that are in middle of receiving packets
381 * will see the new packet type (until the next received packet).
384 void dev_add_pack(struct packet_type *pt)
388 spin_lock_bh(&ptype_lock);
389 if (pt->type == htons(ETH_P_ALL))
390 list_add_rcu(&pt->list, &ptype_all);
392 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
393 list_add_rcu(&pt->list, &ptype_base[hash]);
395 spin_unlock_bh(&ptype_lock);
399 * __dev_remove_pack - remove packet handler
400 * @pt: packet type declaration
402 * Remove a protocol handler that was previously added to the kernel
403 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
404 * from the kernel lists and can be freed or reused once this function
407 * The packet type might still be in use by receivers
408 * and must not be freed until after all the CPU's have gone
409 * through a quiescent state.
411 void __dev_remove_pack(struct packet_type *pt)
413 struct list_head *head;
414 struct packet_type *pt1;
416 spin_lock_bh(&ptype_lock);
418 if (pt->type == htons(ETH_P_ALL))
421 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
423 list_for_each_entry(pt1, head, list) {
425 list_del_rcu(&pt->list);
430 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
432 spin_unlock_bh(&ptype_lock);
435 * dev_remove_pack - remove packet handler
436 * @pt: packet type declaration
438 * Remove a protocol handler that was previously added to the kernel
439 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
440 * from the kernel lists and can be freed or reused once this function
443 * This call sleeps to guarantee that no CPU is looking at the packet
446 void dev_remove_pack(struct packet_type *pt)
448 __dev_remove_pack(pt);
453 /******************************************************************************
455 Device Boot-time Settings Routines
457 *******************************************************************************/
459 /* Boot time configuration table */
460 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
463 * netdev_boot_setup_add - add new setup entry
464 * @name: name of the device
465 * @map: configured settings for the device
467 * Adds new setup entry to the dev_boot_setup list. The function
468 * returns 0 on error and 1 on success. This is a generic routine to
471 static int netdev_boot_setup_add(char *name, struct ifmap *map)
473 struct netdev_boot_setup *s;
477 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
478 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
479 memset(s[i].name, 0, sizeof(s[i].name));
480 strlcpy(s[i].name, name, IFNAMSIZ);
481 memcpy(&s[i].map, map, sizeof(s[i].map));
486 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
490 * netdev_boot_setup_check - check boot time settings
491 * @dev: the netdevice
493 * Check boot time settings for the device.
494 * The found settings are set for the device to be used
495 * later in the device probing.
496 * Returns 0 if no settings found, 1 if they are.
498 int netdev_boot_setup_check(struct net_device *dev)
500 struct netdev_boot_setup *s = dev_boot_setup;
503 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
504 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
505 !strcmp(dev->name, s[i].name)) {
506 dev->irq = s[i].map.irq;
507 dev->base_addr = s[i].map.base_addr;
508 dev->mem_start = s[i].map.mem_start;
509 dev->mem_end = s[i].map.mem_end;
518 * netdev_boot_base - get address from boot time settings
519 * @prefix: prefix for network device
520 * @unit: id for network device
522 * Check boot time settings for the base address of device.
523 * The found settings are set for the device to be used
524 * later in the device probing.
525 * Returns 0 if no settings found.
527 unsigned long netdev_boot_base(const char *prefix, int unit)
529 const struct netdev_boot_setup *s = dev_boot_setup;
533 sprintf(name, "%s%d", prefix, unit);
536 * If device already registered then return base of 1
537 * to indicate not to probe for this interface
539 if (__dev_get_by_name(&init_net, name))
542 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
543 if (!strcmp(name, s[i].name))
544 return s[i].map.base_addr;
549 * Saves at boot time configured settings for any netdevice.
551 int __init netdev_boot_setup(char *str)
556 str = get_options(str, ARRAY_SIZE(ints), ints);
561 memset(&map, 0, sizeof(map));
565 map.base_addr = ints[2];
567 map.mem_start = ints[3];
569 map.mem_end = ints[4];
571 /* Add new entry to the list */
572 return netdev_boot_setup_add(str, &map);
575 __setup("netdev=", netdev_boot_setup);
577 /*******************************************************************************
579 Device Interface Subroutines
581 *******************************************************************************/
584 * __dev_get_by_name - find a device by its name
585 * @net: the applicable net namespace
586 * @name: name to find
588 * Find an interface by name. Must be called under RTNL semaphore
589 * or @dev_base_lock. If the name is found a pointer to the device
590 * is returned. If the name is not found then %NULL is returned. The
591 * reference counters are not incremented so the caller must be
592 * careful with locks.
595 struct net_device *__dev_get_by_name(struct net *net, const char *name)
597 struct hlist_node *p;
599 hlist_for_each(p, dev_name_hash(net, name)) {
600 struct net_device *dev
601 = hlist_entry(p, struct net_device, name_hlist);
602 if (!strncmp(dev->name, name, IFNAMSIZ))
609 * dev_get_by_name - find a device by its name
610 * @net: the applicable net namespace
611 * @name: name to find
613 * Find an interface by name. This can be called from any
614 * context and does its own locking. The returned handle has
615 * the usage count incremented and the caller must use dev_put() to
616 * release it when it is no longer needed. %NULL is returned if no
617 * matching device is found.
620 struct net_device *dev_get_by_name(struct net *net, const char *name)
622 struct net_device *dev;
624 read_lock(&dev_base_lock);
625 dev = __dev_get_by_name(net, name);
628 read_unlock(&dev_base_lock);
633 * __dev_get_by_index - find a device by its ifindex
634 * @net: the applicable net namespace
635 * @ifindex: index of device
637 * Search for an interface by index. Returns %NULL if the device
638 * is not found or a pointer to the device. The device has not
639 * had its reference counter increased so the caller must be careful
640 * about locking. The caller must hold either the RTNL semaphore
644 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
646 struct hlist_node *p;
648 hlist_for_each(p, dev_index_hash(net, ifindex)) {
649 struct net_device *dev
650 = hlist_entry(p, struct net_device, index_hlist);
651 if (dev->ifindex == ifindex)
659 * dev_get_by_index - find a device by its ifindex
660 * @net: the applicable net namespace
661 * @ifindex: index of device
663 * Search for an interface by index. Returns NULL if the device
664 * is not found or a pointer to the device. The device returned has
665 * had a reference added and the pointer is safe until the user calls
666 * dev_put to indicate they have finished with it.
669 struct net_device *dev_get_by_index(struct net *net, int ifindex)
671 struct net_device *dev;
673 read_lock(&dev_base_lock);
674 dev = __dev_get_by_index(net, ifindex);
677 read_unlock(&dev_base_lock);
682 * dev_getbyhwaddr - find a device by its hardware address
683 * @net: the applicable net namespace
684 * @type: media type of device
685 * @ha: hardware address
687 * Search for an interface by MAC address. Returns NULL if the device
688 * is not found or a pointer to the device. The caller must hold the
689 * rtnl semaphore. The returned device has not had its ref count increased
690 * and the caller must therefore be careful about locking
693 * If the API was consistent this would be __dev_get_by_hwaddr
696 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
698 struct net_device *dev;
702 for_each_netdev(net, dev)
703 if (dev->type == type &&
704 !memcmp(dev->dev_addr, ha, dev->addr_len))
710 EXPORT_SYMBOL(dev_getbyhwaddr);
712 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
714 struct net_device *dev;
717 for_each_netdev(net, dev)
718 if (dev->type == type)
724 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
726 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
728 struct net_device *dev;
731 dev = __dev_getfirstbyhwtype(net, type);
738 EXPORT_SYMBOL(dev_getfirstbyhwtype);
741 * dev_get_by_flags - find any device with given flags
742 * @net: the applicable net namespace
743 * @if_flags: IFF_* values
744 * @mask: bitmask of bits in if_flags to check
746 * Search for any interface with the given flags. Returns NULL if a device
747 * is not found or a pointer to the device. The device returned has
748 * had a reference added and the pointer is safe until the user calls
749 * dev_put to indicate they have finished with it.
752 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
754 struct net_device *dev, *ret;
757 read_lock(&dev_base_lock);
758 for_each_netdev(net, dev) {
759 if (((dev->flags ^ if_flags) & mask) == 0) {
765 read_unlock(&dev_base_lock);
770 * dev_valid_name - check if name is okay for network device
773 * Network device names need to be valid file names to
774 * to allow sysfs to work. We also disallow any kind of
777 int dev_valid_name(const char *name)
781 if (strlen(name) >= IFNAMSIZ)
783 if (!strcmp(name, ".") || !strcmp(name, ".."))
787 if (*name == '/' || isspace(*name))
795 * __dev_alloc_name - allocate a name for a device
796 * @net: network namespace to allocate the device name in
797 * @name: name format string
798 * @buf: scratch buffer and result name string
800 * Passed a format string - eg "lt%d" it will try and find a suitable
801 * id. It scans list of devices to build up a free map, then chooses
802 * the first empty slot. The caller must hold the dev_base or rtnl lock
803 * while allocating the name and adding the device in order to avoid
805 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
806 * Returns the number of the unit assigned or a negative errno code.
809 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
813 const int max_netdevices = 8*PAGE_SIZE;
814 unsigned long *inuse;
815 struct net_device *d;
817 p = strnchr(name, IFNAMSIZ-1, '%');
820 * Verify the string as this thing may have come from
821 * the user. There must be either one "%d" and no other "%"
824 if (p[1] != 'd' || strchr(p + 2, '%'))
827 /* Use one page as a bit array of possible slots */
828 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
832 for_each_netdev(net, d) {
833 if (!sscanf(d->name, name, &i))
835 if (i < 0 || i >= max_netdevices)
838 /* avoid cases where sscanf is not exact inverse of printf */
839 snprintf(buf, IFNAMSIZ, name, i);
840 if (!strncmp(buf, d->name, IFNAMSIZ))
844 i = find_first_zero_bit(inuse, max_netdevices);
845 free_page((unsigned long) inuse);
848 snprintf(buf, IFNAMSIZ, name, i);
849 if (!__dev_get_by_name(net, buf))
852 /* It is possible to run out of possible slots
853 * when the name is long and there isn't enough space left
854 * for the digits, or if all bits are used.
860 * dev_alloc_name - allocate a name for a device
862 * @name: name format string
864 * Passed a format string - eg "lt%d" it will try and find a suitable
865 * id. It scans list of devices to build up a free map, then chooses
866 * the first empty slot. The caller must hold the dev_base or rtnl lock
867 * while allocating the name and adding the device in order to avoid
869 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
870 * Returns the number of the unit assigned or a negative errno code.
873 int dev_alloc_name(struct net_device *dev, const char *name)
879 BUG_ON(!dev_net(dev));
881 ret = __dev_alloc_name(net, name, buf);
883 strlcpy(dev->name, buf, IFNAMSIZ);
889 * dev_change_name - change name of a device
891 * @newname: name (or format string) must be at least IFNAMSIZ
893 * Change name of a device, can pass format strings "eth%d".
896 int dev_change_name(struct net_device *dev, const char *newname)
898 char oldname[IFNAMSIZ];
904 BUG_ON(!dev_net(dev));
907 if (dev->flags & IFF_UP)
910 if (!dev_valid_name(newname))
913 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
916 memcpy(oldname, dev->name, IFNAMSIZ);
918 if (strchr(newname, '%')) {
919 err = dev_alloc_name(dev, newname);
923 else if (__dev_get_by_name(net, newname))
926 strlcpy(dev->name, newname, IFNAMSIZ);
929 /* For now only devices in the initial network namespace
932 if (net == &init_net) {
933 ret = device_rename(&dev->dev, dev->name);
935 memcpy(dev->name, oldname, IFNAMSIZ);
940 write_lock_bh(&dev_base_lock);
941 hlist_del(&dev->name_hlist);
942 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
943 write_unlock_bh(&dev_base_lock);
945 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
946 ret = notifier_to_errno(ret);
951 "%s: name change rollback failed: %d.\n",
955 memcpy(dev->name, oldname, IFNAMSIZ);
964 * dev_set_alias - change ifalias of a device
966 * @alias: name up to IFALIASZ
967 * @len: limit of bytes to copy from info
969 * Set ifalias for a device,
971 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
986 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
990 strlcpy(dev->ifalias, alias, len+1);
996 * netdev_features_change - device changes features
997 * @dev: device to cause notification
999 * Called to indicate a device has changed features.
1001 void netdev_features_change(struct net_device *dev)
1003 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1005 EXPORT_SYMBOL(netdev_features_change);
1008 * netdev_state_change - device changes state
1009 * @dev: device to cause notification
1011 * Called to indicate a device has changed state. This function calls
1012 * the notifier chains for netdev_chain and sends a NEWLINK message
1013 * to the routing socket.
1015 void netdev_state_change(struct net_device *dev)
1017 if (dev->flags & IFF_UP) {
1018 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1019 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1023 void netdev_bonding_change(struct net_device *dev)
1025 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1027 EXPORT_SYMBOL(netdev_bonding_change);
1030 * dev_load - load a network module
1031 * @net: the applicable net namespace
1032 * @name: name of interface
1034 * If a network interface is not present and the process has suitable
1035 * privileges this function loads the module. If module loading is not
1036 * available in this kernel then it becomes a nop.
1039 void dev_load(struct net *net, const char *name)
1041 struct net_device *dev;
1043 read_lock(&dev_base_lock);
1044 dev = __dev_get_by_name(net, name);
1045 read_unlock(&dev_base_lock);
1047 if (!dev && capable(CAP_SYS_MODULE))
1048 request_module("%s", name);
1052 * dev_open - prepare an interface for use.
1053 * @dev: device to open
1055 * Takes a device from down to up state. The device's private open
1056 * function is invoked and then the multicast lists are loaded. Finally
1057 * the device is moved into the up state and a %NETDEV_UP message is
1058 * sent to the netdev notifier chain.
1060 * Calling this function on an active interface is a nop. On a failure
1061 * a negative errno code is returned.
1063 int dev_open(struct net_device *dev)
1065 const struct net_device_ops *ops = dev->netdev_ops;
1074 if (dev->flags & IFF_UP)
1078 * Is it even present?
1080 if (!netif_device_present(dev))
1084 * Call device private open method
1086 set_bit(__LINK_STATE_START, &dev->state);
1088 if (ops->ndo_validate_addr)
1089 ret = ops->ndo_validate_addr(dev);
1091 if (!ret && ops->ndo_open)
1092 ret = ops->ndo_open(dev);
1095 * If it went open OK then:
1099 clear_bit(__LINK_STATE_START, &dev->state);
1104 dev->flags |= IFF_UP;
1107 * Initialize multicasting status
1109 dev_set_rx_mode(dev);
1112 * Wakeup transmit queue engine
1117 * ... and announce new interface.
1119 call_netdevice_notifiers(NETDEV_UP, dev);
1126 * dev_close - shutdown an interface.
1127 * @dev: device to shutdown
1129 * This function moves an active device into down state. A
1130 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1131 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1134 int dev_close(struct net_device *dev)
1136 const struct net_device_ops *ops = dev->netdev_ops;
1141 if (!(dev->flags & IFF_UP))
1145 * Tell people we are going down, so that they can
1146 * prepare to death, when device is still operating.
1148 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1150 clear_bit(__LINK_STATE_START, &dev->state);
1152 /* Synchronize to scheduled poll. We cannot touch poll list,
1153 * it can be even on different cpu. So just clear netif_running().
1155 * dev->stop() will invoke napi_disable() on all of it's
1156 * napi_struct instances on this device.
1158 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1160 dev_deactivate(dev);
1163 * Call the device specific close. This cannot fail.
1164 * Only if device is UP
1166 * We allow it to be called even after a DETACH hot-plug
1173 * Device is now down.
1176 dev->flags &= ~IFF_UP;
1179 * Tell people we are down
1181 call_netdevice_notifiers(NETDEV_DOWN, dev);
1188 * dev_disable_lro - disable Large Receive Offload on a device
1191 * Disable Large Receive Offload (LRO) on a net device. Must be
1192 * called under RTNL. This is needed if received packets may be
1193 * forwarded to another interface.
1195 void dev_disable_lro(struct net_device *dev)
1197 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1198 dev->ethtool_ops->set_flags) {
1199 u32 flags = dev->ethtool_ops->get_flags(dev);
1200 if (flags & ETH_FLAG_LRO) {
1201 flags &= ~ETH_FLAG_LRO;
1202 dev->ethtool_ops->set_flags(dev, flags);
1205 WARN_ON(dev->features & NETIF_F_LRO);
1207 EXPORT_SYMBOL(dev_disable_lro);
1210 static int dev_boot_phase = 1;
1213 * Device change register/unregister. These are not inline or static
1214 * as we export them to the world.
1218 * register_netdevice_notifier - register a network notifier block
1221 * Register a notifier to be called when network device events occur.
1222 * The notifier passed is linked into the kernel structures and must
1223 * not be reused until it has been unregistered. A negative errno code
1224 * is returned on a failure.
1226 * When registered all registration and up events are replayed
1227 * to the new notifier to allow device to have a race free
1228 * view of the network device list.
1231 int register_netdevice_notifier(struct notifier_block *nb)
1233 struct net_device *dev;
1234 struct net_device *last;
1239 err = raw_notifier_chain_register(&netdev_chain, nb);
1245 for_each_netdev(net, dev) {
1246 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1247 err = notifier_to_errno(err);
1251 if (!(dev->flags & IFF_UP))
1254 nb->notifier_call(nb, NETDEV_UP, dev);
1265 for_each_netdev(net, dev) {
1269 if (dev->flags & IFF_UP) {
1270 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1271 nb->notifier_call(nb, NETDEV_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1277 raw_notifier_chain_unregister(&netdev_chain, nb);
1282 * unregister_netdevice_notifier - unregister a network notifier block
1285 * Unregister a notifier previously registered by
1286 * register_netdevice_notifier(). The notifier is unlinked into the
1287 * kernel structures and may then be reused. A negative errno code
1288 * is returned on a failure.
1291 int unregister_netdevice_notifier(struct notifier_block *nb)
1296 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1302 * call_netdevice_notifiers - call all network notifier blocks
1303 * @val: value passed unmodified to notifier function
1304 * @dev: net_device pointer passed unmodified to notifier function
1306 * Call all network notifier blocks. Parameters and return value
1307 * are as for raw_notifier_call_chain().
1310 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1312 return raw_notifier_call_chain(&netdev_chain, val, dev);
1315 /* When > 0 there are consumers of rx skb time stamps */
1316 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1318 void net_enable_timestamp(void)
1320 atomic_inc(&netstamp_needed);
1323 void net_disable_timestamp(void)
1325 atomic_dec(&netstamp_needed);
1328 static inline void net_timestamp(struct sk_buff *skb)
1330 if (atomic_read(&netstamp_needed))
1331 __net_timestamp(skb);
1333 skb->tstamp.tv64 = 0;
1337 * Support routine. Sends outgoing frames to any network
1338 * taps currently in use.
1341 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1343 struct packet_type *ptype;
1348 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1349 /* Never send packets back to the socket
1350 * they originated from - MvS (miquels@drinkel.ow.org)
1352 if ((ptype->dev == dev || !ptype->dev) &&
1353 (ptype->af_packet_priv == NULL ||
1354 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1355 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1359 /* skb->nh should be correctly
1360 set by sender, so that the second statement is
1361 just protection against buggy protocols.
1363 skb_reset_mac_header(skb2);
1365 if (skb_network_header(skb2) < skb2->data ||
1366 skb2->network_header > skb2->tail) {
1367 if (net_ratelimit())
1368 printk(KERN_CRIT "protocol %04x is "
1370 skb2->protocol, dev->name);
1371 skb_reset_network_header(skb2);
1374 skb2->transport_header = skb2->network_header;
1375 skb2->pkt_type = PACKET_OUTGOING;
1376 ptype->func(skb2, skb->dev, ptype, skb->dev);
1383 static inline void __netif_reschedule(struct Qdisc *q)
1385 struct softnet_data *sd;
1386 unsigned long flags;
1388 local_irq_save(flags);
1389 sd = &__get_cpu_var(softnet_data);
1390 q->next_sched = sd->output_queue;
1391 sd->output_queue = q;
1392 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1393 local_irq_restore(flags);
1396 void __netif_schedule(struct Qdisc *q)
1398 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1399 __netif_reschedule(q);
1401 EXPORT_SYMBOL(__netif_schedule);
1403 void dev_kfree_skb_irq(struct sk_buff *skb)
1405 if (atomic_dec_and_test(&skb->users)) {
1406 struct softnet_data *sd;
1407 unsigned long flags;
1409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 skb->next = sd->completion_queue;
1412 sd->completion_queue = skb;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1417 EXPORT_SYMBOL(dev_kfree_skb_irq);
1419 void dev_kfree_skb_any(struct sk_buff *skb)
1421 if (in_irq() || irqs_disabled())
1422 dev_kfree_skb_irq(skb);
1426 EXPORT_SYMBOL(dev_kfree_skb_any);
1430 * netif_device_detach - mark device as removed
1431 * @dev: network device
1433 * Mark device as removed from system and therefore no longer available.
1435 void netif_device_detach(struct net_device *dev)
1437 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1438 netif_running(dev)) {
1439 netif_stop_queue(dev);
1442 EXPORT_SYMBOL(netif_device_detach);
1445 * netif_device_attach - mark device as attached
1446 * @dev: network device
1448 * Mark device as attached from system and restart if needed.
1450 void netif_device_attach(struct net_device *dev)
1452 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1453 netif_running(dev)) {
1454 netif_wake_queue(dev);
1455 __netdev_watchdog_up(dev);
1458 EXPORT_SYMBOL(netif_device_attach);
1460 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1462 return ((features & NETIF_F_GEN_CSUM) ||
1463 ((features & NETIF_F_IP_CSUM) &&
1464 protocol == htons(ETH_P_IP)) ||
1465 ((features & NETIF_F_IPV6_CSUM) &&
1466 protocol == htons(ETH_P_IPV6)));
1469 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1471 if (can_checksum_protocol(dev->features, skb->protocol))
1474 if (skb->protocol == htons(ETH_P_8021Q)) {
1475 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1476 if (can_checksum_protocol(dev->features & dev->vlan_features,
1477 veh->h_vlan_encapsulated_proto))
1485 * Invalidate hardware checksum when packet is to be mangled, and
1486 * complete checksum manually on outgoing path.
1488 int skb_checksum_help(struct sk_buff *skb)
1491 int ret = 0, offset;
1493 if (skb->ip_summed == CHECKSUM_COMPLETE)
1494 goto out_set_summed;
1496 if (unlikely(skb_shinfo(skb)->gso_size)) {
1497 /* Let GSO fix up the checksum. */
1498 goto out_set_summed;
1501 offset = skb->csum_start - skb_headroom(skb);
1502 BUG_ON(offset >= skb_headlen(skb));
1503 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1505 offset += skb->csum_offset;
1506 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1508 if (skb_cloned(skb) &&
1509 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1510 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1515 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1517 skb->ip_summed = CHECKSUM_NONE;
1523 * skb_gso_segment - Perform segmentation on skb.
1524 * @skb: buffer to segment
1525 * @features: features for the output path (see dev->features)
1527 * This function segments the given skb and returns a list of segments.
1529 * It may return NULL if the skb requires no segmentation. This is
1530 * only possible when GSO is used for verifying header integrity.
1532 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1534 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1535 struct packet_type *ptype;
1536 __be16 type = skb->protocol;
1539 skb_reset_mac_header(skb);
1540 skb->mac_len = skb->network_header - skb->mac_header;
1541 __skb_pull(skb, skb->mac_len);
1543 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1544 if (skb_header_cloned(skb) &&
1545 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1546 return ERR_PTR(err);
1550 list_for_each_entry_rcu(ptype,
1551 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1552 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1553 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1554 err = ptype->gso_send_check(skb);
1555 segs = ERR_PTR(err);
1556 if (err || skb_gso_ok(skb, features))
1558 __skb_push(skb, (skb->data -
1559 skb_network_header(skb)));
1561 segs = ptype->gso_segment(skb, features);
1567 __skb_push(skb, skb->data - skb_mac_header(skb));
1572 EXPORT_SYMBOL(skb_gso_segment);
1574 /* Take action when hardware reception checksum errors are detected. */
1576 void netdev_rx_csum_fault(struct net_device *dev)
1578 if (net_ratelimit()) {
1579 printk(KERN_ERR "%s: hw csum failure.\n",
1580 dev ? dev->name : "<unknown>");
1584 EXPORT_SYMBOL(netdev_rx_csum_fault);
1587 /* Actually, we should eliminate this check as soon as we know, that:
1588 * 1. IOMMU is present and allows to map all the memory.
1589 * 2. No high memory really exists on this machine.
1592 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1594 #ifdef CONFIG_HIGHMEM
1597 if (dev->features & NETIF_F_HIGHDMA)
1600 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1601 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1609 void (*destructor)(struct sk_buff *skb);
1612 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1614 static void dev_gso_skb_destructor(struct sk_buff *skb)
1616 struct dev_gso_cb *cb;
1619 struct sk_buff *nskb = skb->next;
1621 skb->next = nskb->next;
1624 } while (skb->next);
1626 cb = DEV_GSO_CB(skb);
1628 cb->destructor(skb);
1632 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1633 * @skb: buffer to segment
1635 * This function segments the given skb and stores the list of segments
1638 static int dev_gso_segment(struct sk_buff *skb)
1640 struct net_device *dev = skb->dev;
1641 struct sk_buff *segs;
1642 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1645 segs = skb_gso_segment(skb, features);
1647 /* Verifying header integrity only. */
1652 return PTR_ERR(segs);
1655 DEV_GSO_CB(skb)->destructor = skb->destructor;
1656 skb->destructor = dev_gso_skb_destructor;
1661 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1662 struct netdev_queue *txq)
1664 const struct net_device_ops *ops = dev->netdev_ops;
1666 prefetch(&dev->netdev_ops->ndo_start_xmit);
1667 if (likely(!skb->next)) {
1668 if (!list_empty(&ptype_all))
1669 dev_queue_xmit_nit(skb, dev);
1671 if (netif_needs_gso(dev, skb)) {
1672 if (unlikely(dev_gso_segment(skb)))
1678 return ops->ndo_start_xmit(skb, dev);
1683 struct sk_buff *nskb = skb->next;
1686 skb->next = nskb->next;
1688 rc = ops->ndo_start_xmit(nskb, dev);
1690 nskb->next = skb->next;
1694 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1695 return NETDEV_TX_BUSY;
1696 } while (skb->next);
1698 skb->destructor = DEV_GSO_CB(skb)->destructor;
1705 static u32 simple_tx_hashrnd;
1706 static int simple_tx_hashrnd_initialized = 0;
1708 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1710 u32 addr1, addr2, ports;
1714 if (unlikely(!simple_tx_hashrnd_initialized)) {
1715 get_random_bytes(&simple_tx_hashrnd, 4);
1716 simple_tx_hashrnd_initialized = 1;
1719 switch (skb->protocol) {
1720 case htons(ETH_P_IP):
1721 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1722 ip_proto = ip_hdr(skb)->protocol;
1723 addr1 = ip_hdr(skb)->saddr;
1724 addr2 = ip_hdr(skb)->daddr;
1725 ihl = ip_hdr(skb)->ihl;
1727 case htons(ETH_P_IPV6):
1728 ip_proto = ipv6_hdr(skb)->nexthdr;
1729 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1730 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1745 case IPPROTO_UDPLITE:
1746 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1754 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1756 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1759 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1760 struct sk_buff *skb)
1762 const struct net_device_ops *ops = dev->netdev_ops;
1763 u16 queue_index = 0;
1765 if (ops->ndo_select_queue)
1766 queue_index = ops->ndo_select_queue(dev, skb);
1767 else if (dev->real_num_tx_queues > 1)
1768 queue_index = simple_tx_hash(dev, skb);
1770 skb_set_queue_mapping(skb, queue_index);
1771 return netdev_get_tx_queue(dev, queue_index);
1775 * dev_queue_xmit - transmit a buffer
1776 * @skb: buffer to transmit
1778 * Queue a buffer for transmission to a network device. The caller must
1779 * have set the device and priority and built the buffer before calling
1780 * this function. The function can be called from an interrupt.
1782 * A negative errno code is returned on a failure. A success does not
1783 * guarantee the frame will be transmitted as it may be dropped due
1784 * to congestion or traffic shaping.
1786 * -----------------------------------------------------------------------------------
1787 * I notice this method can also return errors from the queue disciplines,
1788 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1791 * Regardless of the return value, the skb is consumed, so it is currently
1792 * difficult to retry a send to this method. (You can bump the ref count
1793 * before sending to hold a reference for retry if you are careful.)
1795 * When calling this method, interrupts MUST be enabled. This is because
1796 * the BH enable code must have IRQs enabled so that it will not deadlock.
1799 int dev_queue_xmit(struct sk_buff *skb)
1801 struct net_device *dev = skb->dev;
1802 struct netdev_queue *txq;
1806 /* GSO will handle the following emulations directly. */
1807 if (netif_needs_gso(dev, skb))
1810 if (skb_shinfo(skb)->frag_list &&
1811 !(dev->features & NETIF_F_FRAGLIST) &&
1812 __skb_linearize(skb))
1815 /* Fragmented skb is linearized if device does not support SG,
1816 * or if at least one of fragments is in highmem and device
1817 * does not support DMA from it.
1819 if (skb_shinfo(skb)->nr_frags &&
1820 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1821 __skb_linearize(skb))
1824 /* If packet is not checksummed and device does not support
1825 * checksumming for this protocol, complete checksumming here.
1827 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1828 skb_set_transport_header(skb, skb->csum_start -
1830 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1835 /* Disable soft irqs for various locks below. Also
1836 * stops preemption for RCU.
1840 txq = dev_pick_tx(dev, skb);
1841 q = rcu_dereference(txq->qdisc);
1843 #ifdef CONFIG_NET_CLS_ACT
1844 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1847 spinlock_t *root_lock = qdisc_lock(q);
1849 spin_lock(root_lock);
1851 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1855 rc = qdisc_enqueue_root(skb, q);
1858 spin_unlock(root_lock);
1863 /* The device has no queue. Common case for software devices:
1864 loopback, all the sorts of tunnels...
1866 Really, it is unlikely that netif_tx_lock protection is necessary
1867 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1869 However, it is possible, that they rely on protection
1872 Check this and shot the lock. It is not prone from deadlocks.
1873 Either shot noqueue qdisc, it is even simpler 8)
1875 if (dev->flags & IFF_UP) {
1876 int cpu = smp_processor_id(); /* ok because BHs are off */
1878 if (txq->xmit_lock_owner != cpu) {
1880 HARD_TX_LOCK(dev, txq, cpu);
1882 if (!netif_tx_queue_stopped(txq)) {
1884 if (!dev_hard_start_xmit(skb, dev, txq)) {
1885 HARD_TX_UNLOCK(dev, txq);
1889 HARD_TX_UNLOCK(dev, txq);
1890 if (net_ratelimit())
1891 printk(KERN_CRIT "Virtual device %s asks to "
1892 "queue packet!\n", dev->name);
1894 /* Recursion is detected! It is possible,
1896 if (net_ratelimit())
1897 printk(KERN_CRIT "Dead loop on virtual device "
1898 "%s, fix it urgently!\n", dev->name);
1903 rcu_read_unlock_bh();
1909 rcu_read_unlock_bh();
1914 /*=======================================================================
1916 =======================================================================*/
1918 int netdev_max_backlog __read_mostly = 1000;
1919 int netdev_budget __read_mostly = 300;
1920 int weight_p __read_mostly = 64; /* old backlog weight */
1922 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1926 * netif_rx - post buffer to the network code
1927 * @skb: buffer to post
1929 * This function receives a packet from a device driver and queues it for
1930 * the upper (protocol) levels to process. It always succeeds. The buffer
1931 * may be dropped during processing for congestion control or by the
1935 * NET_RX_SUCCESS (no congestion)
1936 * NET_RX_DROP (packet was dropped)
1940 int netif_rx(struct sk_buff *skb)
1942 struct softnet_data *queue;
1943 unsigned long flags;
1945 /* if netpoll wants it, pretend we never saw it */
1946 if (netpoll_rx(skb))
1949 if (!skb->tstamp.tv64)
1953 * The code is rearranged so that the path is the most
1954 * short when CPU is congested, but is still operating.
1956 local_irq_save(flags);
1957 queue = &__get_cpu_var(softnet_data);
1959 __get_cpu_var(netdev_rx_stat).total++;
1960 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1961 if (queue->input_pkt_queue.qlen) {
1963 __skb_queue_tail(&queue->input_pkt_queue, skb);
1964 local_irq_restore(flags);
1965 return NET_RX_SUCCESS;
1968 napi_schedule(&queue->backlog);
1972 __get_cpu_var(netdev_rx_stat).dropped++;
1973 local_irq_restore(flags);
1979 int netif_rx_ni(struct sk_buff *skb)
1984 err = netif_rx(skb);
1985 if (local_softirq_pending())
1992 EXPORT_SYMBOL(netif_rx_ni);
1994 static void net_tx_action(struct softirq_action *h)
1996 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1998 if (sd->completion_queue) {
1999 struct sk_buff *clist;
2001 local_irq_disable();
2002 clist = sd->completion_queue;
2003 sd->completion_queue = NULL;
2007 struct sk_buff *skb = clist;
2008 clist = clist->next;
2010 WARN_ON(atomic_read(&skb->users));
2015 if (sd->output_queue) {
2018 local_irq_disable();
2019 head = sd->output_queue;
2020 sd->output_queue = NULL;
2024 struct Qdisc *q = head;
2025 spinlock_t *root_lock;
2027 head = head->next_sched;
2029 root_lock = qdisc_lock(q);
2030 if (spin_trylock(root_lock)) {
2031 smp_mb__before_clear_bit();
2032 clear_bit(__QDISC_STATE_SCHED,
2035 spin_unlock(root_lock);
2037 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2039 __netif_reschedule(q);
2041 smp_mb__before_clear_bit();
2042 clear_bit(__QDISC_STATE_SCHED,
2050 static inline int deliver_skb(struct sk_buff *skb,
2051 struct packet_type *pt_prev,
2052 struct net_device *orig_dev)
2054 atomic_inc(&skb->users);
2055 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2058 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2059 /* These hooks defined here for ATM */
2061 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2062 unsigned char *addr);
2063 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2066 * If bridge module is loaded call bridging hook.
2067 * returns NULL if packet was consumed.
2069 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2070 struct sk_buff *skb) __read_mostly;
2071 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2072 struct packet_type **pt_prev, int *ret,
2073 struct net_device *orig_dev)
2075 struct net_bridge_port *port;
2077 if (skb->pkt_type == PACKET_LOOPBACK ||
2078 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2082 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2086 return br_handle_frame_hook(port, skb);
2089 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2092 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2093 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2094 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2096 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2097 struct packet_type **pt_prev,
2099 struct net_device *orig_dev)
2101 if (skb->dev->macvlan_port == NULL)
2105 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2108 return macvlan_handle_frame_hook(skb);
2111 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2114 #ifdef CONFIG_NET_CLS_ACT
2115 /* TODO: Maybe we should just force sch_ingress to be compiled in
2116 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2117 * a compare and 2 stores extra right now if we dont have it on
2118 * but have CONFIG_NET_CLS_ACT
2119 * NOTE: This doesnt stop any functionality; if you dont have
2120 * the ingress scheduler, you just cant add policies on ingress.
2123 static int ing_filter(struct sk_buff *skb)
2125 struct net_device *dev = skb->dev;
2126 u32 ttl = G_TC_RTTL(skb->tc_verd);
2127 struct netdev_queue *rxq;
2128 int result = TC_ACT_OK;
2131 if (MAX_RED_LOOP < ttl++) {
2133 "Redir loop detected Dropping packet (%d->%d)\n",
2134 skb->iif, dev->ifindex);
2138 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2139 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2141 rxq = &dev->rx_queue;
2144 if (q != &noop_qdisc) {
2145 spin_lock(qdisc_lock(q));
2146 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2147 result = qdisc_enqueue_root(skb, q);
2148 spin_unlock(qdisc_lock(q));
2154 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2155 struct packet_type **pt_prev,
2156 int *ret, struct net_device *orig_dev)
2158 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2162 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2165 /* Huh? Why does turning on AF_PACKET affect this? */
2166 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2169 switch (ing_filter(skb)) {
2183 * netif_nit_deliver - deliver received packets to network taps
2186 * This function is used to deliver incoming packets to network
2187 * taps. It should be used when the normal netif_receive_skb path
2188 * is bypassed, for example because of VLAN acceleration.
2190 void netif_nit_deliver(struct sk_buff *skb)
2192 struct packet_type *ptype;
2194 if (list_empty(&ptype_all))
2197 skb_reset_network_header(skb);
2198 skb_reset_transport_header(skb);
2199 skb->mac_len = skb->network_header - skb->mac_header;
2202 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2203 if (!ptype->dev || ptype->dev == skb->dev)
2204 deliver_skb(skb, ptype, skb->dev);
2210 * netif_receive_skb - process receive buffer from network
2211 * @skb: buffer to process
2213 * netif_receive_skb() is the main receive data processing function.
2214 * It always succeeds. The buffer may be dropped during processing
2215 * for congestion control or by the protocol layers.
2217 * This function may only be called from softirq context and interrupts
2218 * should be enabled.
2220 * Return values (usually ignored):
2221 * NET_RX_SUCCESS: no congestion
2222 * NET_RX_DROP: packet was dropped
2224 int netif_receive_skb(struct sk_buff *skb)
2226 struct packet_type *ptype, *pt_prev;
2227 struct net_device *orig_dev;
2228 struct net_device *null_or_orig;
2229 int ret = NET_RX_DROP;
2232 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2233 return NET_RX_SUCCESS;
2235 /* if we've gotten here through NAPI, check netpoll */
2236 if (netpoll_receive_skb(skb))
2239 if (!skb->tstamp.tv64)
2243 skb->iif = skb->dev->ifindex;
2245 null_or_orig = NULL;
2246 orig_dev = skb->dev;
2247 if (orig_dev->master) {
2248 if (skb_bond_should_drop(skb))
2249 null_or_orig = orig_dev; /* deliver only exact match */
2251 skb->dev = orig_dev->master;
2254 __get_cpu_var(netdev_rx_stat).total++;
2256 skb_reset_network_header(skb);
2257 skb_reset_transport_header(skb);
2258 skb->mac_len = skb->network_header - skb->mac_header;
2264 /* Don't receive packets in an exiting network namespace */
2265 if (!net_alive(dev_net(skb->dev))) {
2270 #ifdef CONFIG_NET_CLS_ACT
2271 if (skb->tc_verd & TC_NCLS) {
2272 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2277 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2278 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2279 ptype->dev == orig_dev) {
2281 ret = deliver_skb(skb, pt_prev, orig_dev);
2286 #ifdef CONFIG_NET_CLS_ACT
2287 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2293 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2296 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2300 type = skb->protocol;
2301 list_for_each_entry_rcu(ptype,
2302 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2303 if (ptype->type == type &&
2304 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2305 ptype->dev == orig_dev)) {
2307 ret = deliver_skb(skb, pt_prev, orig_dev);
2313 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2316 /* Jamal, now you will not able to escape explaining
2317 * me how you were going to use this. :-)
2327 /* Network device is going away, flush any packets still pending */
2328 static void flush_backlog(void *arg)
2330 struct net_device *dev = arg;
2331 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2332 struct sk_buff *skb, *tmp;
2334 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2335 if (skb->dev == dev) {
2336 __skb_unlink(skb, &queue->input_pkt_queue);
2341 static int napi_gro_complete(struct sk_buff *skb)
2343 struct packet_type *ptype;
2344 __be16 type = skb->protocol;
2345 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2348 if (!skb_shinfo(skb)->frag_list)
2352 list_for_each_entry_rcu(ptype, head, list) {
2353 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2356 err = ptype->gro_complete(skb);
2362 WARN_ON(&ptype->list == head);
2364 return NET_RX_SUCCESS;
2368 __skb_push(skb, -skb_network_offset(skb));
2369 return netif_receive_skb(skb);
2372 void napi_gro_flush(struct napi_struct *napi)
2374 struct sk_buff *skb, *next;
2376 for (skb = napi->gro_list; skb; skb = next) {
2379 napi_gro_complete(skb);
2382 napi->gro_list = NULL;
2384 EXPORT_SYMBOL(napi_gro_flush);
2386 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2388 struct sk_buff **pp = NULL;
2389 struct packet_type *ptype;
2390 __be16 type = skb->protocol;
2391 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2395 if (!(skb->dev->features & NETIF_F_GRO))
2399 list_for_each_entry_rcu(ptype, head, list) {
2402 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2405 skb_reset_network_header(skb);
2406 mac_len = skb->network_header - skb->mac_header;
2407 skb->mac_len = mac_len;
2408 NAPI_GRO_CB(skb)->same_flow = 0;
2409 NAPI_GRO_CB(skb)->flush = 0;
2411 for (p = napi->gro_list; p; p = p->next) {
2413 NAPI_GRO_CB(p)->same_flow =
2414 p->mac_len == mac_len &&
2415 !memcmp(skb_mac_header(p), skb_mac_header(skb),
2417 NAPI_GRO_CB(p)->flush = 0;
2420 pp = ptype->gro_receive(&napi->gro_list, skb);
2425 if (&ptype->list == head)
2429 struct sk_buff *nskb = *pp;
2433 napi_gro_complete(nskb);
2437 if (NAPI_GRO_CB(skb)->same_flow)
2440 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
2441 __skb_push(skb, -skb_network_offset(skb));
2445 NAPI_GRO_CB(skb)->count = 1;
2446 skb->next = napi->gro_list;
2447 napi->gro_list = skb;
2450 return NET_RX_SUCCESS;
2453 return netif_receive_skb(skb);
2455 EXPORT_SYMBOL(napi_gro_receive);
2457 static int process_backlog(struct napi_struct *napi, int quota)
2460 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2461 unsigned long start_time = jiffies;
2463 napi->weight = weight_p;
2465 struct sk_buff *skb;
2467 local_irq_disable();
2468 skb = __skb_dequeue(&queue->input_pkt_queue);
2470 __napi_complete(napi);
2476 napi_gro_receive(napi, skb);
2477 } while (++work < quota && jiffies == start_time);
2479 napi_gro_flush(napi);
2485 * __napi_schedule - schedule for receive
2486 * @n: entry to schedule
2488 * The entry's receive function will be scheduled to run
2490 void __napi_schedule(struct napi_struct *n)
2492 unsigned long flags;
2494 local_irq_save(flags);
2495 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2496 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2497 local_irq_restore(flags);
2499 EXPORT_SYMBOL(__napi_schedule);
2501 void __napi_complete(struct napi_struct *n)
2503 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2504 BUG_ON(n->gro_list);
2506 list_del(&n->poll_list);
2507 smp_mb__before_clear_bit();
2508 clear_bit(NAPI_STATE_SCHED, &n->state);
2510 EXPORT_SYMBOL(__napi_complete);
2512 void napi_complete(struct napi_struct *n)
2514 unsigned long flags;
2517 * don't let napi dequeue from the cpu poll list
2518 * just in case its running on a different cpu
2520 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2524 local_irq_save(flags);
2526 local_irq_restore(flags);
2528 EXPORT_SYMBOL(napi_complete);
2530 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2531 int (*poll)(struct napi_struct *, int), int weight)
2533 INIT_LIST_HEAD(&napi->poll_list);
2534 napi->gro_list = NULL;
2536 napi->weight = weight;
2537 list_add(&napi->dev_list, &dev->napi_list);
2538 #ifdef CONFIG_NETPOLL
2540 spin_lock_init(&napi->poll_lock);
2541 napi->poll_owner = -1;
2543 set_bit(NAPI_STATE_SCHED, &napi->state);
2545 EXPORT_SYMBOL(netif_napi_add);
2547 void netif_napi_del(struct napi_struct *napi)
2549 struct sk_buff *skb, *next;
2551 list_del_init(&napi->dev_list);
2553 for (skb = napi->gro_list; skb; skb = next) {
2559 napi->gro_list = NULL;
2561 EXPORT_SYMBOL(netif_napi_del);
2564 static void net_rx_action(struct softirq_action *h)
2566 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2567 unsigned long time_limit = jiffies + 2;
2568 int budget = netdev_budget;
2571 local_irq_disable();
2573 while (!list_empty(list)) {
2574 struct napi_struct *n;
2577 /* If softirq window is exhuasted then punt.
2578 * Allow this to run for 2 jiffies since which will allow
2579 * an average latency of 1.5/HZ.
2581 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2586 /* Even though interrupts have been re-enabled, this
2587 * access is safe because interrupts can only add new
2588 * entries to the tail of this list, and only ->poll()
2589 * calls can remove this head entry from the list.
2591 n = list_entry(list->next, struct napi_struct, poll_list);
2593 have = netpoll_poll_lock(n);
2597 /* This NAPI_STATE_SCHED test is for avoiding a race
2598 * with netpoll's poll_napi(). Only the entity which
2599 * obtains the lock and sees NAPI_STATE_SCHED set will
2600 * actually make the ->poll() call. Therefore we avoid
2601 * accidently calling ->poll() when NAPI is not scheduled.
2604 if (test_bit(NAPI_STATE_SCHED, &n->state))
2605 work = n->poll(n, weight);
2607 WARN_ON_ONCE(work > weight);
2611 local_irq_disable();
2613 /* Drivers must not modify the NAPI state if they
2614 * consume the entire weight. In such cases this code
2615 * still "owns" the NAPI instance and therefore can
2616 * move the instance around on the list at-will.
2618 if (unlikely(work == weight)) {
2619 if (unlikely(napi_disable_pending(n)))
2622 list_move_tail(&n->poll_list, list);
2625 netpoll_poll_unlock(have);
2630 #ifdef CONFIG_NET_DMA
2632 * There may not be any more sk_buffs coming right now, so push
2633 * any pending DMA copies to hardware
2635 if (!cpus_empty(net_dma.channel_mask)) {
2637 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2638 struct dma_chan *chan = net_dma.channels[chan_idx];
2640 dma_async_memcpy_issue_pending(chan);
2648 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2649 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2653 static gifconf_func_t * gifconf_list [NPROTO];
2656 * register_gifconf - register a SIOCGIF handler
2657 * @family: Address family
2658 * @gifconf: Function handler
2660 * Register protocol dependent address dumping routines. The handler
2661 * that is passed must not be freed or reused until it has been replaced
2662 * by another handler.
2664 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2666 if (family >= NPROTO)
2668 gifconf_list[family] = gifconf;
2674 * Map an interface index to its name (SIOCGIFNAME)
2678 * We need this ioctl for efficient implementation of the
2679 * if_indextoname() function required by the IPv6 API. Without
2680 * it, we would have to search all the interfaces to find a
2684 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2686 struct net_device *dev;
2690 * Fetch the caller's info block.
2693 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2696 read_lock(&dev_base_lock);
2697 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2699 read_unlock(&dev_base_lock);
2703 strcpy(ifr.ifr_name, dev->name);
2704 read_unlock(&dev_base_lock);
2706 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2712 * Perform a SIOCGIFCONF call. This structure will change
2713 * size eventually, and there is nothing I can do about it.
2714 * Thus we will need a 'compatibility mode'.
2717 static int dev_ifconf(struct net *net, char __user *arg)
2720 struct net_device *dev;
2727 * Fetch the caller's info block.
2730 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2737 * Loop over the interfaces, and write an info block for each.
2741 for_each_netdev(net, dev) {
2742 for (i = 0; i < NPROTO; i++) {
2743 if (gifconf_list[i]) {
2746 done = gifconf_list[i](dev, NULL, 0);
2748 done = gifconf_list[i](dev, pos + total,
2758 * All done. Write the updated control block back to the caller.
2760 ifc.ifc_len = total;
2763 * Both BSD and Solaris return 0 here, so we do too.
2765 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2768 #ifdef CONFIG_PROC_FS
2770 * This is invoked by the /proc filesystem handler to display a device
2773 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2774 __acquires(dev_base_lock)
2776 struct net *net = seq_file_net(seq);
2778 struct net_device *dev;
2780 read_lock(&dev_base_lock);
2782 return SEQ_START_TOKEN;
2785 for_each_netdev(net, dev)
2792 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2794 struct net *net = seq_file_net(seq);
2796 return v == SEQ_START_TOKEN ?
2797 first_net_device(net) : next_net_device((struct net_device *)v);
2800 void dev_seq_stop(struct seq_file *seq, void *v)
2801 __releases(dev_base_lock)
2803 read_unlock(&dev_base_lock);
2806 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2808 const struct net_device_stats *stats = dev_get_stats(dev);
2810 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2811 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2812 dev->name, stats->rx_bytes, stats->rx_packets,
2814 stats->rx_dropped + stats->rx_missed_errors,
2815 stats->rx_fifo_errors,
2816 stats->rx_length_errors + stats->rx_over_errors +
2817 stats->rx_crc_errors + stats->rx_frame_errors,
2818 stats->rx_compressed, stats->multicast,
2819 stats->tx_bytes, stats->tx_packets,
2820 stats->tx_errors, stats->tx_dropped,
2821 stats->tx_fifo_errors, stats->collisions,
2822 stats->tx_carrier_errors +
2823 stats->tx_aborted_errors +
2824 stats->tx_window_errors +
2825 stats->tx_heartbeat_errors,
2826 stats->tx_compressed);
2830 * Called from the PROCfs module. This now uses the new arbitrary sized
2831 * /proc/net interface to create /proc/net/dev
2833 static int dev_seq_show(struct seq_file *seq, void *v)
2835 if (v == SEQ_START_TOKEN)
2836 seq_puts(seq, "Inter-| Receive "
2838 " face |bytes packets errs drop fifo frame "
2839 "compressed multicast|bytes packets errs "
2840 "drop fifo colls carrier compressed\n");
2842 dev_seq_printf_stats(seq, v);
2846 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2848 struct netif_rx_stats *rc = NULL;
2850 while (*pos < nr_cpu_ids)
2851 if (cpu_online(*pos)) {
2852 rc = &per_cpu(netdev_rx_stat, *pos);
2859 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2861 return softnet_get_online(pos);
2864 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2867 return softnet_get_online(pos);
2870 static void softnet_seq_stop(struct seq_file *seq, void *v)
2874 static int softnet_seq_show(struct seq_file *seq, void *v)
2876 struct netif_rx_stats *s = v;
2878 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2879 s->total, s->dropped, s->time_squeeze, 0,
2880 0, 0, 0, 0, /* was fastroute */
2885 static const struct seq_operations dev_seq_ops = {
2886 .start = dev_seq_start,
2887 .next = dev_seq_next,
2888 .stop = dev_seq_stop,
2889 .show = dev_seq_show,
2892 static int dev_seq_open(struct inode *inode, struct file *file)
2894 return seq_open_net(inode, file, &dev_seq_ops,
2895 sizeof(struct seq_net_private));
2898 static const struct file_operations dev_seq_fops = {
2899 .owner = THIS_MODULE,
2900 .open = dev_seq_open,
2902 .llseek = seq_lseek,
2903 .release = seq_release_net,
2906 static const struct seq_operations softnet_seq_ops = {
2907 .start = softnet_seq_start,
2908 .next = softnet_seq_next,
2909 .stop = softnet_seq_stop,
2910 .show = softnet_seq_show,
2913 static int softnet_seq_open(struct inode *inode, struct file *file)
2915 return seq_open(file, &softnet_seq_ops);
2918 static const struct file_operations softnet_seq_fops = {
2919 .owner = THIS_MODULE,
2920 .open = softnet_seq_open,
2922 .llseek = seq_lseek,
2923 .release = seq_release,
2926 static void *ptype_get_idx(loff_t pos)
2928 struct packet_type *pt = NULL;
2932 list_for_each_entry_rcu(pt, &ptype_all, list) {
2938 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2939 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2948 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2952 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2955 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2957 struct packet_type *pt;
2958 struct list_head *nxt;
2962 if (v == SEQ_START_TOKEN)
2963 return ptype_get_idx(0);
2966 nxt = pt->list.next;
2967 if (pt->type == htons(ETH_P_ALL)) {
2968 if (nxt != &ptype_all)
2971 nxt = ptype_base[0].next;
2973 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2975 while (nxt == &ptype_base[hash]) {
2976 if (++hash >= PTYPE_HASH_SIZE)
2978 nxt = ptype_base[hash].next;
2981 return list_entry(nxt, struct packet_type, list);
2984 static void ptype_seq_stop(struct seq_file *seq, void *v)
2990 static int ptype_seq_show(struct seq_file *seq, void *v)
2992 struct packet_type *pt = v;
2994 if (v == SEQ_START_TOKEN)
2995 seq_puts(seq, "Type Device Function\n");
2996 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2997 if (pt->type == htons(ETH_P_ALL))
2998 seq_puts(seq, "ALL ");
3000 seq_printf(seq, "%04x", ntohs(pt->type));
3002 seq_printf(seq, " %-8s %pF\n",
3003 pt->dev ? pt->dev->name : "", pt->func);
3009 static const struct seq_operations ptype_seq_ops = {
3010 .start = ptype_seq_start,
3011 .next = ptype_seq_next,
3012 .stop = ptype_seq_stop,
3013 .show = ptype_seq_show,
3016 static int ptype_seq_open(struct inode *inode, struct file *file)
3018 return seq_open_net(inode, file, &ptype_seq_ops,
3019 sizeof(struct seq_net_private));
3022 static const struct file_operations ptype_seq_fops = {
3023 .owner = THIS_MODULE,
3024 .open = ptype_seq_open,
3026 .llseek = seq_lseek,
3027 .release = seq_release_net,
3031 static int __net_init dev_proc_net_init(struct net *net)
3035 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3037 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3039 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3042 if (wext_proc_init(net))
3048 proc_net_remove(net, "ptype");
3050 proc_net_remove(net, "softnet_stat");
3052 proc_net_remove(net, "dev");
3056 static void __net_exit dev_proc_net_exit(struct net *net)
3058 wext_proc_exit(net);
3060 proc_net_remove(net, "ptype");
3061 proc_net_remove(net, "softnet_stat");
3062 proc_net_remove(net, "dev");
3065 static struct pernet_operations __net_initdata dev_proc_ops = {
3066 .init = dev_proc_net_init,
3067 .exit = dev_proc_net_exit,
3070 static int __init dev_proc_init(void)
3072 return register_pernet_subsys(&dev_proc_ops);
3075 #define dev_proc_init() 0
3076 #endif /* CONFIG_PROC_FS */
3080 * netdev_set_master - set up master/slave pair
3081 * @slave: slave device
3082 * @master: new master device
3084 * Changes the master device of the slave. Pass %NULL to break the
3085 * bonding. The caller must hold the RTNL semaphore. On a failure
3086 * a negative errno code is returned. On success the reference counts
3087 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3088 * function returns zero.
3090 int netdev_set_master(struct net_device *slave, struct net_device *master)
3092 struct net_device *old = slave->master;
3102 slave->master = master;
3110 slave->flags |= IFF_SLAVE;
3112 slave->flags &= ~IFF_SLAVE;
3114 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3118 static void dev_change_rx_flags(struct net_device *dev, int flags)
3120 const struct net_device_ops *ops = dev->netdev_ops;
3122 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3123 ops->ndo_change_rx_flags(dev, flags);
3126 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3128 unsigned short old_flags = dev->flags;
3132 dev->flags |= IFF_PROMISC;
3133 dev->promiscuity += inc;
3134 if (dev->promiscuity == 0) {
3137 * If inc causes overflow, untouch promisc and return error.
3140 dev->flags &= ~IFF_PROMISC;
3142 dev->promiscuity -= inc;
3143 printk(KERN_WARNING "%s: promiscuity touches roof, "
3144 "set promiscuity failed, promiscuity feature "
3145 "of device might be broken.\n", dev->name);
3149 if (dev->flags != old_flags) {
3150 printk(KERN_INFO "device %s %s promiscuous mode\n",
3151 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3154 audit_log(current->audit_context, GFP_ATOMIC,
3155 AUDIT_ANOM_PROMISCUOUS,
3156 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3157 dev->name, (dev->flags & IFF_PROMISC),
3158 (old_flags & IFF_PROMISC),
3159 audit_get_loginuid(current),
3160 current->uid, current->gid,
3161 audit_get_sessionid(current));
3163 dev_change_rx_flags(dev, IFF_PROMISC);
3169 * dev_set_promiscuity - update promiscuity count on a device
3173 * Add or remove promiscuity from a device. While the count in the device
3174 * remains above zero the interface remains promiscuous. Once it hits zero
3175 * the device reverts back to normal filtering operation. A negative inc
3176 * value is used to drop promiscuity on the device.
3177 * Return 0 if successful or a negative errno code on error.
3179 int dev_set_promiscuity(struct net_device *dev, int inc)
3181 unsigned short old_flags = dev->flags;
3184 err = __dev_set_promiscuity(dev, inc);
3187 if (dev->flags != old_flags)
3188 dev_set_rx_mode(dev);
3193 * dev_set_allmulti - update allmulti count on a device
3197 * Add or remove reception of all multicast frames to a device. While the
3198 * count in the device remains above zero the interface remains listening
3199 * to all interfaces. Once it hits zero the device reverts back to normal
3200 * filtering operation. A negative @inc value is used to drop the counter
3201 * when releasing a resource needing all multicasts.
3202 * Return 0 if successful or a negative errno code on error.
3205 int dev_set_allmulti(struct net_device *dev, int inc)
3207 unsigned short old_flags = dev->flags;
3211 dev->flags |= IFF_ALLMULTI;
3212 dev->allmulti += inc;
3213 if (dev->allmulti == 0) {
3216 * If inc causes overflow, untouch allmulti and return error.
3219 dev->flags &= ~IFF_ALLMULTI;
3221 dev->allmulti -= inc;
3222 printk(KERN_WARNING "%s: allmulti touches roof, "
3223 "set allmulti failed, allmulti feature of "
3224 "device might be broken.\n", dev->name);
3228 if (dev->flags ^ old_flags) {
3229 dev_change_rx_flags(dev, IFF_ALLMULTI);
3230 dev_set_rx_mode(dev);
3236 * Upload unicast and multicast address lists to device and
3237 * configure RX filtering. When the device doesn't support unicast
3238 * filtering it is put in promiscuous mode while unicast addresses
3241 void __dev_set_rx_mode(struct net_device *dev)
3243 const struct net_device_ops *ops = dev->netdev_ops;
3245 /* dev_open will call this function so the list will stay sane. */
3246 if (!(dev->flags&IFF_UP))
3249 if (!netif_device_present(dev))
3252 if (ops->ndo_set_rx_mode)
3253 ops->ndo_set_rx_mode(dev);
3255 /* Unicast addresses changes may only happen under the rtnl,
3256 * therefore calling __dev_set_promiscuity here is safe.
3258 if (dev->uc_count > 0 && !dev->uc_promisc) {
3259 __dev_set_promiscuity(dev, 1);
3260 dev->uc_promisc = 1;
3261 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3262 __dev_set_promiscuity(dev, -1);
3263 dev->uc_promisc = 0;
3266 if (ops->ndo_set_multicast_list)
3267 ops->ndo_set_multicast_list(dev);
3271 void dev_set_rx_mode(struct net_device *dev)
3273 netif_addr_lock_bh(dev);
3274 __dev_set_rx_mode(dev);
3275 netif_addr_unlock_bh(dev);
3278 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3279 void *addr, int alen, int glbl)
3281 struct dev_addr_list *da;
3283 for (; (da = *list) != NULL; list = &da->next) {
3284 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3285 alen == da->da_addrlen) {
3287 int old_glbl = da->da_gusers;
3304 int __dev_addr_add(struct dev_addr_list **list, int *count,
3305 void *addr, int alen, int glbl)
3307 struct dev_addr_list *da;
3309 for (da = *list; da != NULL; da = da->next) {
3310 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3311 da->da_addrlen == alen) {
3313 int old_glbl = da->da_gusers;
3323 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3326 memcpy(da->da_addr, addr, alen);
3327 da->da_addrlen = alen;
3329 da->da_gusers = glbl ? 1 : 0;
3337 * dev_unicast_delete - Release secondary unicast address.
3339 * @addr: address to delete
3340 * @alen: length of @addr
3342 * Release reference to a secondary unicast address and remove it
3343 * from the device if the reference count drops to zero.
3345 * The caller must hold the rtnl_mutex.
3347 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3353 netif_addr_lock_bh(dev);
3354 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3356 __dev_set_rx_mode(dev);
3357 netif_addr_unlock_bh(dev);
3360 EXPORT_SYMBOL(dev_unicast_delete);
3363 * dev_unicast_add - add a secondary unicast address
3365 * @addr: address to add
3366 * @alen: length of @addr
3368 * Add a secondary unicast address to the device or increase
3369 * the reference count if it already exists.
3371 * The caller must hold the rtnl_mutex.
3373 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3379 netif_addr_lock_bh(dev);
3380 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3382 __dev_set_rx_mode(dev);
3383 netif_addr_unlock_bh(dev);
3386 EXPORT_SYMBOL(dev_unicast_add);
3388 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3389 struct dev_addr_list **from, int *from_count)
3391 struct dev_addr_list *da, *next;
3395 while (da != NULL) {
3397 if (!da->da_synced) {
3398 err = __dev_addr_add(to, to_count,
3399 da->da_addr, da->da_addrlen, 0);
3404 } else if (da->da_users == 1) {
3405 __dev_addr_delete(to, to_count,
3406 da->da_addr, da->da_addrlen, 0);
3407 __dev_addr_delete(from, from_count,
3408 da->da_addr, da->da_addrlen, 0);
3415 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3416 struct dev_addr_list **from, int *from_count)
3418 struct dev_addr_list *da, *next;
3421 while (da != NULL) {
3423 if (da->da_synced) {
3424 __dev_addr_delete(to, to_count,
3425 da->da_addr, da->da_addrlen, 0);
3427 __dev_addr_delete(from, from_count,
3428 da->da_addr, da->da_addrlen, 0);
3435 * dev_unicast_sync - Synchronize device's unicast list to another device
3436 * @to: destination device
3437 * @from: source device
3439 * Add newly added addresses to the destination device and release
3440 * addresses that have no users left. The source device must be
3441 * locked by netif_tx_lock_bh.
3443 * This function is intended to be called from the dev->set_rx_mode
3444 * function of layered software devices.
3446 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3450 netif_addr_lock_bh(to);
3451 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3452 &from->uc_list, &from->uc_count);
3454 __dev_set_rx_mode(to);
3455 netif_addr_unlock_bh(to);
3458 EXPORT_SYMBOL(dev_unicast_sync);
3461 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3462 * @to: destination device
3463 * @from: source device
3465 * Remove all addresses that were added to the destination device by
3466 * dev_unicast_sync(). This function is intended to be called from the
3467 * dev->stop function of layered software devices.
3469 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3471 netif_addr_lock_bh(from);
3472 netif_addr_lock(to);
3474 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3475 &from->uc_list, &from->uc_count);
3476 __dev_set_rx_mode(to);
3478 netif_addr_unlock(to);
3479 netif_addr_unlock_bh(from);
3481 EXPORT_SYMBOL(dev_unicast_unsync);
3483 static void __dev_addr_discard(struct dev_addr_list **list)
3485 struct dev_addr_list *tmp;
3487 while (*list != NULL) {
3490 if (tmp->da_users > tmp->da_gusers)
3491 printk("__dev_addr_discard: address leakage! "
3492 "da_users=%d\n", tmp->da_users);
3497 static void dev_addr_discard(struct net_device *dev)
3499 netif_addr_lock_bh(dev);
3501 __dev_addr_discard(&dev->uc_list);
3504 __dev_addr_discard(&dev->mc_list);
3507 netif_addr_unlock_bh(dev);
3511 * dev_get_flags - get flags reported to userspace
3514 * Get the combination of flag bits exported through APIs to userspace.
3516 unsigned dev_get_flags(const struct net_device *dev)
3520 flags = (dev->flags & ~(IFF_PROMISC |
3525 (dev->gflags & (IFF_PROMISC |
3528 if (netif_running(dev)) {
3529 if (netif_oper_up(dev))
3530 flags |= IFF_RUNNING;
3531 if (netif_carrier_ok(dev))
3532 flags |= IFF_LOWER_UP;
3533 if (netif_dormant(dev))
3534 flags |= IFF_DORMANT;
3541 * dev_change_flags - change device settings
3543 * @flags: device state flags
3545 * Change settings on device based state flags. The flags are
3546 * in the userspace exported format.
3548 int dev_change_flags(struct net_device *dev, unsigned flags)
3551 int old_flags = dev->flags;
3556 * Set the flags on our device.
3559 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3560 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3562 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3566 * Load in the correct multicast list now the flags have changed.
3569 if ((old_flags ^ flags) & IFF_MULTICAST)
3570 dev_change_rx_flags(dev, IFF_MULTICAST);
3572 dev_set_rx_mode(dev);
3575 * Have we downed the interface. We handle IFF_UP ourselves
3576 * according to user attempts to set it, rather than blindly
3581 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3582 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3585 dev_set_rx_mode(dev);
3588 if (dev->flags & IFF_UP &&
3589 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3591 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3593 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3594 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3595 dev->gflags ^= IFF_PROMISC;
3596 dev_set_promiscuity(dev, inc);
3599 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3600 is important. Some (broken) drivers set IFF_PROMISC, when
3601 IFF_ALLMULTI is requested not asking us and not reporting.
3603 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3604 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3605 dev->gflags ^= IFF_ALLMULTI;
3606 dev_set_allmulti(dev, inc);
3609 /* Exclude state transition flags, already notified */
3610 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3612 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3618 * dev_set_mtu - Change maximum transfer unit
3620 * @new_mtu: new transfer unit
3622 * Change the maximum transfer size of the network device.
3624 int dev_set_mtu(struct net_device *dev, int new_mtu)
3626 const struct net_device_ops *ops = dev->netdev_ops;
3629 if (new_mtu == dev->mtu)
3632 /* MTU must be positive. */
3636 if (!netif_device_present(dev))
3640 if (ops->ndo_change_mtu)
3641 err = ops->ndo_change_mtu(dev, new_mtu);
3645 if (!err && dev->flags & IFF_UP)
3646 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3651 * dev_set_mac_address - Change Media Access Control Address
3655 * Change the hardware (MAC) address of the device
3657 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3659 const struct net_device_ops *ops = dev->netdev_ops;
3662 if (!ops->ndo_set_mac_address)
3664 if (sa->sa_family != dev->type)
3666 if (!netif_device_present(dev))
3668 err = ops->ndo_set_mac_address(dev, sa);
3670 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3675 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3677 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3680 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3686 case SIOCGIFFLAGS: /* Get interface flags */
3687 ifr->ifr_flags = dev_get_flags(dev);
3690 case SIOCGIFMETRIC: /* Get the metric on the interface
3691 (currently unused) */
3692 ifr->ifr_metric = 0;
3695 case SIOCGIFMTU: /* Get the MTU of a device */
3696 ifr->ifr_mtu = dev->mtu;
3701 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3703 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3704 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3705 ifr->ifr_hwaddr.sa_family = dev->type;
3713 ifr->ifr_map.mem_start = dev->mem_start;
3714 ifr->ifr_map.mem_end = dev->mem_end;
3715 ifr->ifr_map.base_addr = dev->base_addr;
3716 ifr->ifr_map.irq = dev->irq;
3717 ifr->ifr_map.dma = dev->dma;
3718 ifr->ifr_map.port = dev->if_port;
3722 ifr->ifr_ifindex = dev->ifindex;
3726 ifr->ifr_qlen = dev->tx_queue_len;
3730 /* dev_ioctl() should ensure this case
3742 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3744 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3747 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3748 const struct net_device_ops *ops;
3753 ops = dev->netdev_ops;
3756 case SIOCSIFFLAGS: /* Set interface flags */
3757 return dev_change_flags(dev, ifr->ifr_flags);
3759 case SIOCSIFMETRIC: /* Set the metric on the interface
3760 (currently unused) */
3763 case SIOCSIFMTU: /* Set the MTU of a device */
3764 return dev_set_mtu(dev, ifr->ifr_mtu);
3767 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3769 case SIOCSIFHWBROADCAST:
3770 if (ifr->ifr_hwaddr.sa_family != dev->type)
3772 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3773 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3774 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3778 if (ops->ndo_set_config) {
3779 if (!netif_device_present(dev))
3781 return ops->ndo_set_config(dev, &ifr->ifr_map);
3786 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3787 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3789 if (!netif_device_present(dev))
3791 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3795 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3796 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3798 if (!netif_device_present(dev))
3800 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3804 if (ifr->ifr_qlen < 0)
3806 dev->tx_queue_len = ifr->ifr_qlen;
3810 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3811 return dev_change_name(dev, ifr->ifr_newname);
3814 * Unknown or private ioctl
3818 if ((cmd >= SIOCDEVPRIVATE &&
3819 cmd <= SIOCDEVPRIVATE + 15) ||
3820 cmd == SIOCBONDENSLAVE ||
3821 cmd == SIOCBONDRELEASE ||
3822 cmd == SIOCBONDSETHWADDR ||
3823 cmd == SIOCBONDSLAVEINFOQUERY ||
3824 cmd == SIOCBONDINFOQUERY ||
3825 cmd == SIOCBONDCHANGEACTIVE ||
3826 cmd == SIOCGMIIPHY ||
3827 cmd == SIOCGMIIREG ||
3828 cmd == SIOCSMIIREG ||
3829 cmd == SIOCBRADDIF ||
3830 cmd == SIOCBRDELIF ||
3831 cmd == SIOCWANDEV) {
3833 if (ops->ndo_do_ioctl) {
3834 if (netif_device_present(dev))
3835 err = ops->ndo_do_ioctl(dev, ifr, cmd);
3847 * This function handles all "interface"-type I/O control requests. The actual
3848 * 'doing' part of this is dev_ifsioc above.
3852 * dev_ioctl - network device ioctl
3853 * @net: the applicable net namespace
3854 * @cmd: command to issue
3855 * @arg: pointer to a struct ifreq in user space
3857 * Issue ioctl functions to devices. This is normally called by the
3858 * user space syscall interfaces but can sometimes be useful for
3859 * other purposes. The return value is the return from the syscall if
3860 * positive or a negative errno code on error.
3863 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3869 /* One special case: SIOCGIFCONF takes ifconf argument
3870 and requires shared lock, because it sleeps writing
3874 if (cmd == SIOCGIFCONF) {
3876 ret = dev_ifconf(net, (char __user *) arg);
3880 if (cmd == SIOCGIFNAME)
3881 return dev_ifname(net, (struct ifreq __user *)arg);
3883 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3886 ifr.ifr_name[IFNAMSIZ-1] = 0;
3888 colon = strchr(ifr.ifr_name, ':');
3893 * See which interface the caller is talking about.
3898 * These ioctl calls:
3899 * - can be done by all.
3900 * - atomic and do not require locking.
3911 dev_load(net, ifr.ifr_name);
3912 read_lock(&dev_base_lock);
3913 ret = dev_ifsioc_locked(net, &ifr, cmd);
3914 read_unlock(&dev_base_lock);
3918 if (copy_to_user(arg, &ifr,
3919 sizeof(struct ifreq)))
3925 dev_load(net, ifr.ifr_name);
3927 ret = dev_ethtool(net, &ifr);
3932 if (copy_to_user(arg, &ifr,
3933 sizeof(struct ifreq)))
3939 * These ioctl calls:
3940 * - require superuser power.
3941 * - require strict serialization.
3947 if (!capable(CAP_NET_ADMIN))
3949 dev_load(net, ifr.ifr_name);
3951 ret = dev_ifsioc(net, &ifr, cmd);
3956 if (copy_to_user(arg, &ifr,
3957 sizeof(struct ifreq)))
3963 * These ioctl calls:
3964 * - require superuser power.
3965 * - require strict serialization.
3966 * - do not return a value
3976 case SIOCSIFHWBROADCAST:
3979 case SIOCBONDENSLAVE:
3980 case SIOCBONDRELEASE:
3981 case SIOCBONDSETHWADDR:
3982 case SIOCBONDCHANGEACTIVE:
3985 if (!capable(CAP_NET_ADMIN))
3988 case SIOCBONDSLAVEINFOQUERY:
3989 case SIOCBONDINFOQUERY:
3990 dev_load(net, ifr.ifr_name);
3992 ret = dev_ifsioc(net, &ifr, cmd);
3997 /* Get the per device memory space. We can add this but
3998 * currently do not support it */
4000 /* Set the per device memory buffer space.
4001 * Not applicable in our case */
4006 * Unknown or private ioctl.
4009 if (cmd == SIOCWANDEV ||
4010 (cmd >= SIOCDEVPRIVATE &&
4011 cmd <= SIOCDEVPRIVATE + 15)) {
4012 dev_load(net, ifr.ifr_name);
4014 ret = dev_ifsioc(net, &ifr, cmd);
4016 if (!ret && copy_to_user(arg, &ifr,
4017 sizeof(struct ifreq)))
4021 /* Take care of Wireless Extensions */
4022 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4023 return wext_handle_ioctl(net, &ifr, cmd, arg);
4030 * dev_new_index - allocate an ifindex
4031 * @net: the applicable net namespace
4033 * Returns a suitable unique value for a new device interface
4034 * number. The caller must hold the rtnl semaphore or the
4035 * dev_base_lock to be sure it remains unique.
4037 static int dev_new_index(struct net *net)
4043 if (!__dev_get_by_index(net, ifindex))
4048 /* Delayed registration/unregisteration */
4049 static LIST_HEAD(net_todo_list);
4051 static void net_set_todo(struct net_device *dev)
4053 list_add_tail(&dev->todo_list, &net_todo_list);
4056 static void rollback_registered(struct net_device *dev)
4058 BUG_ON(dev_boot_phase);
4061 /* Some devices call without registering for initialization unwind. */
4062 if (dev->reg_state == NETREG_UNINITIALIZED) {
4063 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4064 "was registered\n", dev->name, dev);
4070 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4072 /* If device is running, close it first. */
4075 /* And unlink it from device chain. */
4076 unlist_netdevice(dev);
4078 dev->reg_state = NETREG_UNREGISTERING;
4082 /* Shutdown queueing discipline. */
4086 /* Notify protocols, that we are about to destroy
4087 this device. They should clean all the things.
4089 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4092 * Flush the unicast and multicast chains
4094 dev_addr_discard(dev);
4096 if (dev->netdev_ops->ndo_uninit)
4097 dev->netdev_ops->ndo_uninit(dev);
4099 /* Notifier chain MUST detach us from master device. */
4100 WARN_ON(dev->master);
4102 /* Remove entries from kobject tree */
4103 netdev_unregister_kobject(dev);
4110 static void __netdev_init_queue_locks_one(struct net_device *dev,
4111 struct netdev_queue *dev_queue,
4114 spin_lock_init(&dev_queue->_xmit_lock);
4115 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4116 dev_queue->xmit_lock_owner = -1;
4119 static void netdev_init_queue_locks(struct net_device *dev)
4121 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4122 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4125 unsigned long netdev_fix_features(unsigned long features, const char *name)
4127 /* Fix illegal SG+CSUM combinations. */
4128 if ((features & NETIF_F_SG) &&
4129 !(features & NETIF_F_ALL_CSUM)) {
4131 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4132 "checksum feature.\n", name);
4133 features &= ~NETIF_F_SG;
4136 /* TSO requires that SG is present as well. */
4137 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4139 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4140 "SG feature.\n", name);
4141 features &= ~NETIF_F_TSO;
4144 if (features & NETIF_F_UFO) {
4145 if (!(features & NETIF_F_GEN_CSUM)) {
4147 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4148 "since no NETIF_F_HW_CSUM feature.\n",
4150 features &= ~NETIF_F_UFO;
4153 if (!(features & NETIF_F_SG)) {
4155 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4156 "since no NETIF_F_SG feature.\n", name);
4157 features &= ~NETIF_F_UFO;
4163 EXPORT_SYMBOL(netdev_fix_features);
4166 * register_netdevice - register a network device
4167 * @dev: device to register
4169 * Take a completed network device structure and add it to the kernel
4170 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4171 * chain. 0 is returned on success. A negative errno code is returned
4172 * on a failure to set up the device, or if the name is a duplicate.
4174 * Callers must hold the rtnl semaphore. You may want
4175 * register_netdev() instead of this.
4178 * The locking appears insufficient to guarantee two parallel registers
4179 * will not get the same name.
4182 int register_netdevice(struct net_device *dev)
4184 struct hlist_head *head;
4185 struct hlist_node *p;
4187 struct net *net = dev_net(dev);
4189 BUG_ON(dev_boot_phase);
4194 /* When net_device's are persistent, this will be fatal. */
4195 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4198 spin_lock_init(&dev->addr_list_lock);
4199 netdev_set_addr_lockdep_class(dev);
4200 netdev_init_queue_locks(dev);
4204 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4205 /* Netdevice_ops API compatiability support.
4206 * This is temporary until all network devices are converted.
4208 if (dev->netdev_ops) {
4209 const struct net_device_ops *ops = dev->netdev_ops;
4211 dev->init = ops->ndo_init;
4212 dev->uninit = ops->ndo_uninit;
4213 dev->open = ops->ndo_open;
4214 dev->change_rx_flags = ops->ndo_change_rx_flags;
4215 dev->set_rx_mode = ops->ndo_set_rx_mode;
4216 dev->set_multicast_list = ops->ndo_set_multicast_list;
4217 dev->set_mac_address = ops->ndo_set_mac_address;
4218 dev->validate_addr = ops->ndo_validate_addr;
4219 dev->do_ioctl = ops->ndo_do_ioctl;
4220 dev->set_config = ops->ndo_set_config;
4221 dev->change_mtu = ops->ndo_change_mtu;
4222 dev->tx_timeout = ops->ndo_tx_timeout;
4223 dev->get_stats = ops->ndo_get_stats;
4224 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4225 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4226 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4227 #ifdef CONFIG_NET_POLL_CONTROLLER
4228 dev->poll_controller = ops->ndo_poll_controller;
4231 char drivername[64];
4232 pr_info("%s (%s): not using net_device_ops yet\n",
4233 dev->name, netdev_drivername(dev, drivername, 64));
4235 /* This works only because net_device_ops and the
4236 compatiablity structure are the same. */
4237 dev->netdev_ops = (void *) &(dev->init);
4241 /* Init, if this function is available */
4242 if (dev->netdev_ops->ndo_init) {
4243 ret = dev->netdev_ops->ndo_init(dev);
4251 if (!dev_valid_name(dev->name)) {
4256 dev->ifindex = dev_new_index(net);
4257 if (dev->iflink == -1)
4258 dev->iflink = dev->ifindex;
4260 /* Check for existence of name */
4261 head = dev_name_hash(net, dev->name);
4262 hlist_for_each(p, head) {
4263 struct net_device *d
4264 = hlist_entry(p, struct net_device, name_hlist);
4265 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4271 /* Fix illegal checksum combinations */
4272 if ((dev->features & NETIF_F_HW_CSUM) &&
4273 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4274 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4276 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4279 if ((dev->features & NETIF_F_NO_CSUM) &&
4280 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4281 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4283 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4286 dev->features = netdev_fix_features(dev->features, dev->name);
4288 /* Enable software GSO if SG is supported. */
4289 if (dev->features & NETIF_F_SG)
4290 dev->features |= NETIF_F_GSO;
4292 netdev_initialize_kobject(dev);
4293 ret = netdev_register_kobject(dev);
4296 dev->reg_state = NETREG_REGISTERED;
4299 * Default initial state at registry is that the
4300 * device is present.
4303 set_bit(__LINK_STATE_PRESENT, &dev->state);
4305 dev_init_scheduler(dev);
4307 list_netdevice(dev);
4309 /* Notify protocols, that a new device appeared. */
4310 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4311 ret = notifier_to_errno(ret);
4313 rollback_registered(dev);
4314 dev->reg_state = NETREG_UNREGISTERED;
4321 if (dev->netdev_ops->ndo_uninit)
4322 dev->netdev_ops->ndo_uninit(dev);
4327 * register_netdev - register a network device
4328 * @dev: device to register
4330 * Take a completed network device structure and add it to the kernel
4331 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4332 * chain. 0 is returned on success. A negative errno code is returned
4333 * on a failure to set up the device, or if the name is a duplicate.
4335 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4336 * and expands the device name if you passed a format string to
4339 int register_netdev(struct net_device *dev)
4346 * If the name is a format string the caller wants us to do a
4349 if (strchr(dev->name, '%')) {
4350 err = dev_alloc_name(dev, dev->name);
4355 err = register_netdevice(dev);
4360 EXPORT_SYMBOL(register_netdev);
4363 * netdev_wait_allrefs - wait until all references are gone.
4365 * This is called when unregistering network devices.
4367 * Any protocol or device that holds a reference should register
4368 * for netdevice notification, and cleanup and put back the
4369 * reference if they receive an UNREGISTER event.
4370 * We can get stuck here if buggy protocols don't correctly
4373 static void netdev_wait_allrefs(struct net_device *dev)
4375 unsigned long rebroadcast_time, warning_time;
4377 rebroadcast_time = warning_time = jiffies;
4378 while (atomic_read(&dev->refcnt) != 0) {
4379 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4382 /* Rebroadcast unregister notification */
4383 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4385 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4387 /* We must not have linkwatch events
4388 * pending on unregister. If this
4389 * happens, we simply run the queue
4390 * unscheduled, resulting in a noop
4393 linkwatch_run_queue();
4398 rebroadcast_time = jiffies;
4403 if (time_after(jiffies, warning_time + 10 * HZ)) {
4404 printk(KERN_EMERG "unregister_netdevice: "
4405 "waiting for %s to become free. Usage "
4407 dev->name, atomic_read(&dev->refcnt));
4408 warning_time = jiffies;
4417 * register_netdevice(x1);
4418 * register_netdevice(x2);
4420 * unregister_netdevice(y1);
4421 * unregister_netdevice(y2);
4427 * We are invoked by rtnl_unlock().
4428 * This allows us to deal with problems:
4429 * 1) We can delete sysfs objects which invoke hotplug
4430 * without deadlocking with linkwatch via keventd.
4431 * 2) Since we run with the RTNL semaphore not held, we can sleep
4432 * safely in order to wait for the netdev refcnt to drop to zero.
4434 * We must not return until all unregister events added during
4435 * the interval the lock was held have been completed.
4437 void netdev_run_todo(void)
4439 struct list_head list;
4441 /* Snapshot list, allow later requests */
4442 list_replace_init(&net_todo_list, &list);
4446 while (!list_empty(&list)) {
4447 struct net_device *dev
4448 = list_entry(list.next, struct net_device, todo_list);
4449 list_del(&dev->todo_list);
4451 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4452 printk(KERN_ERR "network todo '%s' but state %d\n",
4453 dev->name, dev->reg_state);
4458 dev->reg_state = NETREG_UNREGISTERED;
4460 on_each_cpu(flush_backlog, dev, 1);
4462 netdev_wait_allrefs(dev);
4465 BUG_ON(atomic_read(&dev->refcnt));
4466 WARN_ON(dev->ip_ptr);
4467 WARN_ON(dev->ip6_ptr);
4468 WARN_ON(dev->dn_ptr);
4470 if (dev->destructor)
4471 dev->destructor(dev);
4473 /* Free network device */
4474 kobject_put(&dev->dev.kobj);
4479 * dev_get_stats - get network device statistics
4480 * @dev: device to get statistics from
4482 * Get network statistics from device. The device driver may provide
4483 * its own method by setting dev->netdev_ops->get_stats; otherwise
4484 * the internal statistics structure is used.
4486 const struct net_device_stats *dev_get_stats(struct net_device *dev)
4488 const struct net_device_ops *ops = dev->netdev_ops;
4490 if (ops->ndo_get_stats)
4491 return ops->ndo_get_stats(dev);
4495 EXPORT_SYMBOL(dev_get_stats);
4497 static void netdev_init_one_queue(struct net_device *dev,
4498 struct netdev_queue *queue,
4504 static void netdev_init_queues(struct net_device *dev)
4506 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4507 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4508 spin_lock_init(&dev->tx_global_lock);
4512 * alloc_netdev_mq - allocate network device
4513 * @sizeof_priv: size of private data to allocate space for
4514 * @name: device name format string
4515 * @setup: callback to initialize device
4516 * @queue_count: the number of subqueues to allocate
4518 * Allocates a struct net_device with private data area for driver use
4519 * and performs basic initialization. Also allocates subquue structs
4520 * for each queue on the device at the end of the netdevice.
4522 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4523 void (*setup)(struct net_device *), unsigned int queue_count)
4525 struct netdev_queue *tx;
4526 struct net_device *dev;
4530 BUG_ON(strlen(name) >= sizeof(dev->name));
4532 alloc_size = sizeof(struct net_device);
4534 /* ensure 32-byte alignment of private area */
4535 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4536 alloc_size += sizeof_priv;
4538 /* ensure 32-byte alignment of whole construct */
4539 alloc_size += NETDEV_ALIGN_CONST;
4541 p = kzalloc(alloc_size, GFP_KERNEL);
4543 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4547 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4549 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4555 dev = (struct net_device *)
4556 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4557 dev->padded = (char *)dev - (char *)p;
4558 dev_net_set(dev, &init_net);
4561 dev->num_tx_queues = queue_count;
4562 dev->real_num_tx_queues = queue_count;
4564 dev->gso_max_size = GSO_MAX_SIZE;
4566 netdev_init_queues(dev);
4568 INIT_LIST_HEAD(&dev->napi_list);
4570 strcpy(dev->name, name);
4573 EXPORT_SYMBOL(alloc_netdev_mq);
4576 * free_netdev - free network device
4579 * This function does the last stage of destroying an allocated device
4580 * interface. The reference to the device object is released.
4581 * If this is the last reference then it will be freed.
4583 void free_netdev(struct net_device *dev)
4585 struct napi_struct *p, *n;
4587 release_net(dev_net(dev));
4591 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4594 /* Compatibility with error handling in drivers */
4595 if (dev->reg_state == NETREG_UNINITIALIZED) {
4596 kfree((char *)dev - dev->padded);
4600 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4601 dev->reg_state = NETREG_RELEASED;
4603 /* will free via device release */
4604 put_device(&dev->dev);
4608 * synchronize_net - Synchronize with packet receive processing
4610 * Wait for packets currently being received to be done.
4611 * Does not block later packets from starting.
4613 void synchronize_net(void)
4620 * unregister_netdevice - remove device from the kernel
4623 * This function shuts down a device interface and removes it
4624 * from the kernel tables.
4626 * Callers must hold the rtnl semaphore. You may want
4627 * unregister_netdev() instead of this.
4630 void unregister_netdevice(struct net_device *dev)
4634 rollback_registered(dev);
4635 /* Finish processing unregister after unlock */
4640 * unregister_netdev - remove device from the kernel
4643 * This function shuts down a device interface and removes it
4644 * from the kernel tables.
4646 * This is just a wrapper for unregister_netdevice that takes
4647 * the rtnl semaphore. In general you want to use this and not
4648 * unregister_netdevice.
4650 void unregister_netdev(struct net_device *dev)
4653 unregister_netdevice(dev);
4657 EXPORT_SYMBOL(unregister_netdev);
4660 * dev_change_net_namespace - move device to different nethost namespace
4662 * @net: network namespace
4663 * @pat: If not NULL name pattern to try if the current device name
4664 * is already taken in the destination network namespace.
4666 * This function shuts down a device interface and moves it
4667 * to a new network namespace. On success 0 is returned, on
4668 * a failure a netagive errno code is returned.
4670 * Callers must hold the rtnl semaphore.
4673 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4676 const char *destname;
4681 /* Don't allow namespace local devices to be moved. */
4683 if (dev->features & NETIF_F_NETNS_LOCAL)
4687 /* Don't allow real devices to be moved when sysfs
4691 if (dev->dev.parent)
4695 /* Ensure the device has been registrered */
4697 if (dev->reg_state != NETREG_REGISTERED)
4700 /* Get out if there is nothing todo */
4702 if (net_eq(dev_net(dev), net))
4705 /* Pick the destination device name, and ensure
4706 * we can use it in the destination network namespace.
4709 destname = dev->name;
4710 if (__dev_get_by_name(net, destname)) {
4711 /* We get here if we can't use the current device name */
4714 if (!dev_valid_name(pat))
4716 if (strchr(pat, '%')) {
4717 if (__dev_alloc_name(net, pat, buf) < 0)
4722 if (__dev_get_by_name(net, destname))
4727 * And now a mini version of register_netdevice unregister_netdevice.
4730 /* If device is running close it first. */
4733 /* And unlink it from device chain */
4735 unlist_netdevice(dev);
4739 /* Shutdown queueing discipline. */
4742 /* Notify protocols, that we are about to destroy
4743 this device. They should clean all the things.
4745 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4748 * Flush the unicast and multicast chains
4750 dev_addr_discard(dev);
4752 netdev_unregister_kobject(dev);
4754 /* Actually switch the network namespace */
4755 dev_net_set(dev, net);
4757 /* Assign the new device name */
4758 if (destname != dev->name)
4759 strcpy(dev->name, destname);
4761 /* If there is an ifindex conflict assign a new one */
4762 if (__dev_get_by_index(net, dev->ifindex)) {
4763 int iflink = (dev->iflink == dev->ifindex);
4764 dev->ifindex = dev_new_index(net);
4766 dev->iflink = dev->ifindex;
4769 /* Fixup kobjects */
4770 err = netdev_register_kobject(dev);
4773 /* Add the device back in the hashes */
4774 list_netdevice(dev);
4776 /* Notify protocols, that a new device appeared. */
4777 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4785 static int dev_cpu_callback(struct notifier_block *nfb,
4786 unsigned long action,
4789 struct sk_buff **list_skb;
4790 struct Qdisc **list_net;
4791 struct sk_buff *skb;
4792 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4793 struct softnet_data *sd, *oldsd;
4795 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4798 local_irq_disable();
4799 cpu = smp_processor_id();
4800 sd = &per_cpu(softnet_data, cpu);
4801 oldsd = &per_cpu(softnet_data, oldcpu);
4803 /* Find end of our completion_queue. */
4804 list_skb = &sd->completion_queue;
4806 list_skb = &(*list_skb)->next;
4807 /* Append completion queue from offline CPU. */
4808 *list_skb = oldsd->completion_queue;
4809 oldsd->completion_queue = NULL;
4811 /* Find end of our output_queue. */
4812 list_net = &sd->output_queue;
4814 list_net = &(*list_net)->next_sched;
4815 /* Append output queue from offline CPU. */
4816 *list_net = oldsd->output_queue;
4817 oldsd->output_queue = NULL;
4819 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4822 /* Process offline CPU's input_pkt_queue */
4823 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4829 #ifdef CONFIG_NET_DMA
4831 * net_dma_rebalance - try to maintain one DMA channel per CPU
4832 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4834 * This is called when the number of channels allocated to the net_dma client
4835 * changes. The net_dma client tries to have one DMA channel per CPU.
4838 static void net_dma_rebalance(struct net_dma *net_dma)
4840 unsigned int cpu, i, n, chan_idx;
4841 struct dma_chan *chan;
4843 if (cpus_empty(net_dma->channel_mask)) {
4844 for_each_online_cpu(cpu)
4845 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4850 cpu = first_cpu(cpu_online_map);
4852 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4853 chan = net_dma->channels[chan_idx];
4855 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4856 + (i < (num_online_cpus() %
4857 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4860 per_cpu(softnet_data, cpu).net_dma = chan;
4861 cpu = next_cpu(cpu, cpu_online_map);
4869 * netdev_dma_event - event callback for the net_dma_client
4870 * @client: should always be net_dma_client
4871 * @chan: DMA channel for the event
4872 * @state: DMA state to be handled
4874 static enum dma_state_client
4875 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4876 enum dma_state state)
4878 int i, found = 0, pos = -1;
4879 struct net_dma *net_dma =
4880 container_of(client, struct net_dma, client);
4881 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4883 spin_lock(&net_dma->lock);
4885 case DMA_RESOURCE_AVAILABLE:
4886 for (i = 0; i < nr_cpu_ids; i++)
4887 if (net_dma->channels[i] == chan) {
4890 } else if (net_dma->channels[i] == NULL && pos < 0)
4893 if (!found && pos >= 0) {
4895 net_dma->channels[pos] = chan;
4896 cpu_set(pos, net_dma->channel_mask);
4897 net_dma_rebalance(net_dma);
4900 case DMA_RESOURCE_REMOVED:
4901 for (i = 0; i < nr_cpu_ids; i++)
4902 if (net_dma->channels[i] == chan) {
4910 cpu_clear(pos, net_dma->channel_mask);
4911 net_dma->channels[i] = NULL;
4912 net_dma_rebalance(net_dma);
4918 spin_unlock(&net_dma->lock);
4924 * netdev_dma_register - register the networking subsystem as a DMA client
4926 static int __init netdev_dma_register(void)
4928 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4930 if (unlikely(!net_dma.channels)) {
4932 "netdev_dma: no memory for net_dma.channels\n");
4935 spin_lock_init(&net_dma.lock);
4936 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4937 dma_async_client_register(&net_dma.client);
4938 dma_async_client_chan_request(&net_dma.client);
4943 static int __init netdev_dma_register(void) { return -ENODEV; }
4944 #endif /* CONFIG_NET_DMA */
4947 * netdev_increment_features - increment feature set by one
4948 * @all: current feature set
4949 * @one: new feature set
4950 * @mask: mask feature set
4952 * Computes a new feature set after adding a device with feature set
4953 * @one to the master device with current feature set @all. Will not
4954 * enable anything that is off in @mask. Returns the new feature set.
4956 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
4959 /* If device needs checksumming, downgrade to it. */
4960 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4961 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
4962 else if (mask & NETIF_F_ALL_CSUM) {
4963 /* If one device supports v4/v6 checksumming, set for all. */
4964 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
4965 !(all & NETIF_F_GEN_CSUM)) {
4966 all &= ~NETIF_F_ALL_CSUM;
4967 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4970 /* If one device supports hw checksumming, set for all. */
4971 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
4972 all &= ~NETIF_F_ALL_CSUM;
4973 all |= NETIF_F_HW_CSUM;
4977 one |= NETIF_F_ALL_CSUM;
4979 one |= all & NETIF_F_ONE_FOR_ALL;
4980 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
4981 all |= one & mask & NETIF_F_ONE_FOR_ALL;
4985 EXPORT_SYMBOL(netdev_increment_features);
4987 static struct hlist_head *netdev_create_hash(void)
4990 struct hlist_head *hash;
4992 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4994 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4995 INIT_HLIST_HEAD(&hash[i]);
5000 /* Initialize per network namespace state */
5001 static int __net_init netdev_init(struct net *net)
5003 INIT_LIST_HEAD(&net->dev_base_head);
5005 net->dev_name_head = netdev_create_hash();
5006 if (net->dev_name_head == NULL)
5009 net->dev_index_head = netdev_create_hash();
5010 if (net->dev_index_head == NULL)
5016 kfree(net->dev_name_head);
5022 * netdev_drivername - network driver for the device
5023 * @dev: network device
5024 * @buffer: buffer for resulting name
5025 * @len: size of buffer
5027 * Determine network driver for device.
5029 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5031 const struct device_driver *driver;
5032 const struct device *parent;
5034 if (len <= 0 || !buffer)
5038 parent = dev->dev.parent;
5043 driver = parent->driver;
5044 if (driver && driver->name)
5045 strlcpy(buffer, driver->name, len);
5049 static void __net_exit netdev_exit(struct net *net)
5051 kfree(net->dev_name_head);
5052 kfree(net->dev_index_head);
5055 static struct pernet_operations __net_initdata netdev_net_ops = {
5056 .init = netdev_init,
5057 .exit = netdev_exit,
5060 static void __net_exit default_device_exit(struct net *net)
5062 struct net_device *dev, *next;
5064 * Push all migratable of the network devices back to the
5065 * initial network namespace
5068 for_each_netdev_safe(net, dev, next) {
5070 char fb_name[IFNAMSIZ];
5072 /* Ignore unmoveable devices (i.e. loopback) */
5073 if (dev->features & NETIF_F_NETNS_LOCAL)
5076 /* Delete virtual devices */
5077 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5078 dev->rtnl_link_ops->dellink(dev);
5082 /* Push remaing network devices to init_net */
5083 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5084 err = dev_change_net_namespace(dev, &init_net, fb_name);
5086 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5087 __func__, dev->name, err);
5094 static struct pernet_operations __net_initdata default_device_ops = {
5095 .exit = default_device_exit,
5099 * Initialize the DEV module. At boot time this walks the device list and
5100 * unhooks any devices that fail to initialise (normally hardware not
5101 * present) and leaves us with a valid list of present and active devices.
5106 * This is called single threaded during boot, so no need
5107 * to take the rtnl semaphore.
5109 static int __init net_dev_init(void)
5111 int i, rc = -ENOMEM;
5113 BUG_ON(!dev_boot_phase);
5115 if (dev_proc_init())
5118 if (netdev_kobject_init())
5121 INIT_LIST_HEAD(&ptype_all);
5122 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5123 INIT_LIST_HEAD(&ptype_base[i]);
5125 if (register_pernet_subsys(&netdev_net_ops))
5129 * Initialise the packet receive queues.
5132 for_each_possible_cpu(i) {
5133 struct softnet_data *queue;
5135 queue = &per_cpu(softnet_data, i);
5136 skb_queue_head_init(&queue->input_pkt_queue);
5137 queue->completion_queue = NULL;
5138 INIT_LIST_HEAD(&queue->poll_list);
5140 queue->backlog.poll = process_backlog;
5141 queue->backlog.weight = weight_p;
5142 queue->backlog.gro_list = NULL;
5147 /* The loopback device is special if any other network devices
5148 * is present in a network namespace the loopback device must
5149 * be present. Since we now dynamically allocate and free the
5150 * loopback device ensure this invariant is maintained by
5151 * keeping the loopback device as the first device on the
5152 * list of network devices. Ensuring the loopback devices
5153 * is the first device that appears and the last network device
5156 if (register_pernet_device(&loopback_net_ops))
5159 if (register_pernet_device(&default_device_ops))
5162 netdev_dma_register();
5164 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5165 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5167 hotcpu_notifier(dev_cpu_callback, 0);
5175 subsys_initcall(net_dev_init);
5177 EXPORT_SYMBOL(__dev_get_by_index);
5178 EXPORT_SYMBOL(__dev_get_by_name);
5179 EXPORT_SYMBOL(__dev_remove_pack);
5180 EXPORT_SYMBOL(dev_valid_name);
5181 EXPORT_SYMBOL(dev_add_pack);
5182 EXPORT_SYMBOL(dev_alloc_name);
5183 EXPORT_SYMBOL(dev_close);
5184 EXPORT_SYMBOL(dev_get_by_flags);
5185 EXPORT_SYMBOL(dev_get_by_index);
5186 EXPORT_SYMBOL(dev_get_by_name);
5187 EXPORT_SYMBOL(dev_open);
5188 EXPORT_SYMBOL(dev_queue_xmit);
5189 EXPORT_SYMBOL(dev_remove_pack);
5190 EXPORT_SYMBOL(dev_set_allmulti);
5191 EXPORT_SYMBOL(dev_set_promiscuity);
5192 EXPORT_SYMBOL(dev_change_flags);
5193 EXPORT_SYMBOL(dev_set_mtu);
5194 EXPORT_SYMBOL(dev_set_mac_address);
5195 EXPORT_SYMBOL(free_netdev);
5196 EXPORT_SYMBOL(netdev_boot_setup_check);
5197 EXPORT_SYMBOL(netdev_set_master);
5198 EXPORT_SYMBOL(netdev_state_change);
5199 EXPORT_SYMBOL(netif_receive_skb);
5200 EXPORT_SYMBOL(netif_rx);
5201 EXPORT_SYMBOL(register_gifconf);
5202 EXPORT_SYMBOL(register_netdevice);
5203 EXPORT_SYMBOL(register_netdevice_notifier);
5204 EXPORT_SYMBOL(skb_checksum_help);
5205 EXPORT_SYMBOL(synchronize_net);
5206 EXPORT_SYMBOL(unregister_netdevice);
5207 EXPORT_SYMBOL(unregister_netdevice_notifier);
5208 EXPORT_SYMBOL(net_enable_timestamp);
5209 EXPORT_SYMBOL(net_disable_timestamp);
5210 EXPORT_SYMBOL(dev_get_flags);
5212 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5213 EXPORT_SYMBOL(br_handle_frame_hook);
5214 EXPORT_SYMBOL(br_fdb_get_hook);
5215 EXPORT_SYMBOL(br_fdb_put_hook);
5218 EXPORT_SYMBOL(dev_load);
5220 EXPORT_PER_CPU_SYMBOL(softnet_data);