1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct *refill_workqueue;
42 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
43 * queued onto this work queue. This is not a per-nic work queue, because
44 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
46 static struct workqueue_struct *reset_workqueue;
48 /**************************************************************************
52 *************************************************************************/
55 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
57 * This sets the default for new devices. It can be controlled later
60 static int lro = true;
61 module_param(lro, int, 0644);
62 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
65 * Use separate channels for TX and RX events
67 * Set this to 1 to use separate channels for TX and RX. It allows us
68 * to control interrupt affinity separately for TX and RX.
70 * This is only used in MSI-X interrupt mode
72 static unsigned int separate_tx_channels;
73 module_param(separate_tx_channels, uint, 0644);
74 MODULE_PARM_DESC(separate_tx_channels,
75 "Use separate channels for TX and RX");
77 /* This is the weight assigned to each of the (per-channel) virtual
80 static int napi_weight = 64;
82 /* This is the time (in jiffies) between invocations of the hardware
83 * monitor, which checks for known hardware bugs and resets the
84 * hardware and driver as necessary.
86 unsigned int efx_monitor_interval = 1 * HZ;
88 /* This controls whether or not the driver will initialise devices
89 * with invalid MAC addresses stored in the EEPROM or flash. If true,
90 * such devices will be initialised with a random locally-generated
91 * MAC address. This allows for loading the sfc_mtd driver to
92 * reprogram the flash, even if the flash contents (including the MAC
93 * address) have previously been erased.
95 static unsigned int allow_bad_hwaddr;
97 /* Initial interrupt moderation settings. They can be modified after
98 * module load with ethtool.
100 * The default for RX should strike a balance between increasing the
101 * round-trip latency and reducing overhead.
103 static unsigned int rx_irq_mod_usec = 60;
105 /* Initial interrupt moderation settings. They can be modified after
106 * module load with ethtool.
108 * This default is chosen to ensure that a 10G link does not go idle
109 * while a TX queue is stopped after it has become full. A queue is
110 * restarted when it drops below half full. The time this takes (assuming
111 * worst case 3 descriptors per packet and 1024 descriptors) is
112 * 512 / 3 * 1.2 = 205 usec.
114 static unsigned int tx_irq_mod_usec = 150;
116 /* This is the first interrupt mode to try out of:
121 static unsigned int interrupt_mode;
123 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
124 * i.e. the number of CPUs among which we may distribute simultaneous
125 * interrupt handling.
127 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
128 * The default (0) means to assign an interrupt to each package (level II cache)
130 static unsigned int rss_cpus;
131 module_param(rss_cpus, uint, 0444);
132 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
134 static int phy_flash_cfg;
135 module_param(phy_flash_cfg, int, 0644);
136 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
138 /**************************************************************************
140 * Utility functions and prototypes
142 *************************************************************************/
143 static void efx_remove_channel(struct efx_channel *channel);
144 static void efx_remove_port(struct efx_nic *efx);
145 static void efx_fini_napi(struct efx_nic *efx);
146 static void efx_fini_channels(struct efx_nic *efx);
148 #define EFX_ASSERT_RESET_SERIALISED(efx) \
150 if (efx->state == STATE_RUNNING) \
154 /**************************************************************************
156 * Event queue processing
158 *************************************************************************/
160 /* Process channel's event queue
162 * This function is responsible for processing the event queue of a
163 * single channel. The caller must guarantee that this function will
164 * never be concurrently called more than once on the same channel,
165 * though different channels may be being processed concurrently.
167 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
169 struct efx_nic *efx = channel->efx;
172 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
176 rx_packets = falcon_process_eventq(channel, rx_quota);
180 /* Deliver last RX packet. */
181 if (channel->rx_pkt) {
182 __efx_rx_packet(channel, channel->rx_pkt,
183 channel->rx_pkt_csummed);
184 channel->rx_pkt = NULL;
187 efx_flush_lro(channel);
188 efx_rx_strategy(channel);
190 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
195 /* Mark channel as finished processing
197 * Note that since we will not receive further interrupts for this
198 * channel before we finish processing and call the eventq_read_ack()
199 * method, there is no need to use the interrupt hold-off timers.
201 static inline void efx_channel_processed(struct efx_channel *channel)
203 /* The interrupt handler for this channel may set work_pending
204 * as soon as we acknowledge the events we've seen. Make sure
205 * it's cleared before then. */
206 channel->work_pending = false;
209 falcon_eventq_read_ack(channel);
214 * NAPI guarantees serialisation of polls of the same device, which
215 * provides the guarantee required by efx_process_channel().
217 static int efx_poll(struct napi_struct *napi, int budget)
219 struct efx_channel *channel =
220 container_of(napi, struct efx_channel, napi_str);
221 struct net_device *napi_dev = channel->napi_dev;
224 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
225 channel->channel, raw_smp_processor_id());
227 rx_packets = efx_process_channel(channel, budget);
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
242 /* Process the eventq of the specified channel immediately on this CPU
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
248 * Since we are touching interrupts the caller should hold the suspend lock
250 void efx_process_channel_now(struct efx_channel *channel)
252 struct efx_nic *efx = channel->efx;
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
260 synchronize_irq(efx->legacy_irq);
262 synchronize_irq(channel->irq);
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
267 /* Poll the channel */
268 efx_process_channel(channel, efx->type->evq_size);
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
278 /* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
283 static int efx_probe_eventq(struct efx_channel *channel)
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
287 return falcon_probe_eventq(channel);
290 /* Prepare channel's event queue */
291 static void efx_init_eventq(struct efx_channel *channel)
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
295 channel->eventq_read_ptr = 0;
297 falcon_init_eventq(channel);
300 static void efx_fini_eventq(struct efx_channel *channel)
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
304 falcon_fini_eventq(channel);
307 static void efx_remove_eventq(struct efx_channel *channel)
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
311 falcon_remove_eventq(channel);
314 /**************************************************************************
318 *************************************************************************/
320 static int efx_probe_channel(struct efx_channel *channel)
322 struct efx_tx_queue *tx_queue;
323 struct efx_rx_queue *rx_queue;
326 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
328 rc = efx_probe_eventq(channel);
332 efx_for_each_channel_tx_queue(tx_queue, channel) {
333 rc = efx_probe_tx_queue(tx_queue);
338 efx_for_each_channel_rx_queue(rx_queue, channel) {
339 rc = efx_probe_rx_queue(rx_queue);
344 channel->n_rx_frm_trunc = 0;
349 efx_for_each_channel_rx_queue(rx_queue, channel)
350 efx_remove_rx_queue(rx_queue);
352 efx_for_each_channel_tx_queue(tx_queue, channel)
353 efx_remove_tx_queue(tx_queue);
359 /* Channels are shutdown and reinitialised whilst the NIC is running
360 * to propagate configuration changes (mtu, checksum offload), or
361 * to clear hardware error conditions
363 static void efx_init_channels(struct efx_nic *efx)
365 struct efx_tx_queue *tx_queue;
366 struct efx_rx_queue *rx_queue;
367 struct efx_channel *channel;
369 /* Calculate the rx buffer allocation parameters required to
370 * support the current MTU, including padding for header
371 * alignment and overruns.
373 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
374 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
375 efx->type->rx_buffer_padding);
376 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
378 /* Initialise the channels */
379 efx_for_each_channel(channel, efx) {
380 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
382 efx_init_eventq(channel);
384 efx_for_each_channel_tx_queue(tx_queue, channel)
385 efx_init_tx_queue(tx_queue);
387 /* The rx buffer allocation strategy is MTU dependent */
388 efx_rx_strategy(channel);
390 efx_for_each_channel_rx_queue(rx_queue, channel)
391 efx_init_rx_queue(rx_queue);
393 WARN_ON(channel->rx_pkt != NULL);
394 efx_rx_strategy(channel);
398 /* This enables event queue processing and packet transmission.
400 * Note that this function is not allowed to fail, since that would
401 * introduce too much complexity into the suspend/resume path.
403 static void efx_start_channel(struct efx_channel *channel)
405 struct efx_rx_queue *rx_queue;
407 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
409 if (!(channel->efx->net_dev->flags & IFF_UP))
410 netif_napi_add(channel->napi_dev, &channel->napi_str,
411 efx_poll, napi_weight);
413 /* The interrupt handler for this channel may set work_pending
414 * as soon as we enable it. Make sure it's cleared before
415 * then. Similarly, make sure it sees the enabled flag set. */
416 channel->work_pending = false;
417 channel->enabled = true;
420 napi_enable(&channel->napi_str);
422 /* Load up RX descriptors */
423 efx_for_each_channel_rx_queue(rx_queue, channel)
424 efx_fast_push_rx_descriptors(rx_queue);
427 /* This disables event queue processing and packet transmission.
428 * This function does not guarantee that all queue processing
429 * (e.g. RX refill) is complete.
431 static void efx_stop_channel(struct efx_channel *channel)
433 struct efx_rx_queue *rx_queue;
435 if (!channel->enabled)
438 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
440 channel->enabled = false;
441 napi_disable(&channel->napi_str);
443 /* Ensure that any worker threads have exited or will be no-ops */
444 efx_for_each_channel_rx_queue(rx_queue, channel) {
445 spin_lock_bh(&rx_queue->add_lock);
446 spin_unlock_bh(&rx_queue->add_lock);
450 static void efx_fini_channels(struct efx_nic *efx)
452 struct efx_channel *channel;
453 struct efx_tx_queue *tx_queue;
454 struct efx_rx_queue *rx_queue;
457 EFX_ASSERT_RESET_SERIALISED(efx);
458 BUG_ON(efx->port_enabled);
460 rc = falcon_flush_queues(efx);
462 EFX_ERR(efx, "failed to flush queues\n");
464 EFX_LOG(efx, "successfully flushed all queues\n");
466 efx_for_each_channel(channel, efx) {
467 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
469 efx_for_each_channel_rx_queue(rx_queue, channel)
470 efx_fini_rx_queue(rx_queue);
471 efx_for_each_channel_tx_queue(tx_queue, channel)
472 efx_fini_tx_queue(tx_queue);
473 efx_fini_eventq(channel);
477 static void efx_remove_channel(struct efx_channel *channel)
479 struct efx_tx_queue *tx_queue;
480 struct efx_rx_queue *rx_queue;
482 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
484 efx_for_each_channel_rx_queue(rx_queue, channel)
485 efx_remove_rx_queue(rx_queue);
486 efx_for_each_channel_tx_queue(tx_queue, channel)
487 efx_remove_tx_queue(tx_queue);
488 efx_remove_eventq(channel);
490 channel->used_flags = 0;
493 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
495 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
498 /**************************************************************************
502 **************************************************************************/
504 /* This ensures that the kernel is kept informed (via
505 * netif_carrier_on/off) of the link status, and also maintains the
506 * link status's stop on the port's TX queue.
508 static void efx_link_status_changed(struct efx_nic *efx)
510 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
511 * that no events are triggered between unregister_netdev() and the
512 * driver unloading. A more general condition is that NETDEV_CHANGE
513 * can only be generated between NETDEV_UP and NETDEV_DOWN */
514 if (!netif_running(efx->net_dev))
517 if (efx->port_inhibited) {
518 netif_carrier_off(efx->net_dev);
522 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
523 efx->n_link_state_changes++;
526 netif_carrier_on(efx->net_dev);
528 netif_carrier_off(efx->net_dev);
531 /* Status message for kernel log */
533 struct mii_if_info *gmii = &efx->mii;
535 /* NONE here means direct XAUI from the controller, with no
536 * MDIO-attached device we can query. */
537 if (efx->phy_type != PHY_TYPE_NONE) {
538 adv = gmii_advertised(gmii);
539 lpa = gmii_lpa(gmii);
541 lpa = GM_LPA_10000 | LPA_DUPLEX;
544 EFX_INFO(efx, "link up at %dMbps %s-duplex "
545 "(adv %04x lpa %04x) (MTU %d)%s\n",
546 (efx->link_options & GM_LPA_10000 ? 10000 :
547 (efx->link_options & GM_LPA_1000 ? 1000 :
548 (efx->link_options & GM_LPA_100 ? 100 :
550 (efx->link_options & GM_LPA_DUPLEX ?
554 (efx->promiscuous ? " [PROMISC]" : ""));
556 EFX_INFO(efx, "link down\n");
561 /* This call reinitialises the MAC to pick up new PHY settings. The
562 * caller must hold the mac_lock */
563 void __efx_reconfigure_port(struct efx_nic *efx)
565 WARN_ON(!mutex_is_locked(&efx->mac_lock));
567 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
568 raw_smp_processor_id());
570 /* Serialise the promiscuous flag with efx_set_multicast_list. */
571 if (efx_dev_registered(efx)) {
572 netif_addr_lock_bh(efx->net_dev);
573 netif_addr_unlock_bh(efx->net_dev);
576 falcon_reconfigure_xmac(efx);
578 /* Inform kernel of loss/gain of carrier */
579 efx_link_status_changed(efx);
582 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
584 void efx_reconfigure_port(struct efx_nic *efx)
586 EFX_ASSERT_RESET_SERIALISED(efx);
588 mutex_lock(&efx->mac_lock);
589 __efx_reconfigure_port(efx);
590 mutex_unlock(&efx->mac_lock);
593 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
594 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
595 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
596 static void efx_reconfigure_work(struct work_struct *data)
598 struct efx_nic *efx = container_of(data, struct efx_nic,
601 mutex_lock(&efx->mac_lock);
602 if (efx->port_enabled)
603 __efx_reconfigure_port(efx);
604 mutex_unlock(&efx->mac_lock);
607 static int efx_probe_port(struct efx_nic *efx)
611 EFX_LOG(efx, "create port\n");
613 /* Connect up MAC/PHY operations table and read MAC address */
614 rc = falcon_probe_port(efx);
619 efx->phy_mode = PHY_MODE_SPECIAL;
621 /* Sanity check MAC address */
622 if (is_valid_ether_addr(efx->mac_address)) {
623 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
625 EFX_ERR(efx, "invalid MAC address %pM\n",
627 if (!allow_bad_hwaddr) {
631 random_ether_addr(efx->net_dev->dev_addr);
632 EFX_INFO(efx, "using locally-generated MAC %pM\n",
633 efx->net_dev->dev_addr);
639 efx_remove_port(efx);
643 static int efx_init_port(struct efx_nic *efx)
647 EFX_LOG(efx, "init port\n");
649 /* Initialise the MAC and PHY */
650 rc = falcon_init_xmac(efx);
654 efx->port_initialized = true;
655 efx->stats_enabled = true;
657 /* Reconfigure port to program MAC registers */
658 falcon_reconfigure_xmac(efx);
663 /* Allow efx_reconfigure_port() to be scheduled, and close the window
664 * between efx_stop_port and efx_flush_all whereby a previously scheduled
665 * efx_reconfigure_port() may have been cancelled */
666 static void efx_start_port(struct efx_nic *efx)
668 EFX_LOG(efx, "start port\n");
669 BUG_ON(efx->port_enabled);
671 mutex_lock(&efx->mac_lock);
672 efx->port_enabled = true;
673 __efx_reconfigure_port(efx);
674 mutex_unlock(&efx->mac_lock);
677 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
678 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
679 * efx_reconfigure_work can still be scheduled via NAPI processing
680 * until efx_flush_all() is called */
681 static void efx_stop_port(struct efx_nic *efx)
683 EFX_LOG(efx, "stop port\n");
685 mutex_lock(&efx->mac_lock);
686 efx->port_enabled = false;
687 mutex_unlock(&efx->mac_lock);
689 /* Serialise against efx_set_multicast_list() */
690 if (efx_dev_registered(efx)) {
691 netif_addr_lock_bh(efx->net_dev);
692 netif_addr_unlock_bh(efx->net_dev);
696 static void efx_fini_port(struct efx_nic *efx)
698 EFX_LOG(efx, "shut down port\n");
700 if (!efx->port_initialized)
703 falcon_fini_xmac(efx);
704 efx->port_initialized = false;
706 efx->link_up = false;
707 efx_link_status_changed(efx);
710 static void efx_remove_port(struct efx_nic *efx)
712 EFX_LOG(efx, "destroying port\n");
714 falcon_remove_port(efx);
717 /**************************************************************************
721 **************************************************************************/
723 /* This configures the PCI device to enable I/O and DMA. */
724 static int efx_init_io(struct efx_nic *efx)
726 struct pci_dev *pci_dev = efx->pci_dev;
727 dma_addr_t dma_mask = efx->type->max_dma_mask;
730 EFX_LOG(efx, "initialising I/O\n");
732 rc = pci_enable_device(pci_dev);
734 EFX_ERR(efx, "failed to enable PCI device\n");
738 pci_set_master(pci_dev);
740 /* Set the PCI DMA mask. Try all possibilities from our
741 * genuine mask down to 32 bits, because some architectures
742 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
743 * masks event though they reject 46 bit masks.
745 while (dma_mask > 0x7fffffffUL) {
746 if (pci_dma_supported(pci_dev, dma_mask) &&
747 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
752 EFX_ERR(efx, "could not find a suitable DMA mask\n");
755 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
756 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
758 /* pci_set_consistent_dma_mask() is not *allowed* to
759 * fail with a mask that pci_set_dma_mask() accepted,
760 * but just in case...
762 EFX_ERR(efx, "failed to set consistent DMA mask\n");
766 efx->membase_phys = pci_resource_start(efx->pci_dev,
768 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
770 EFX_ERR(efx, "request for memory BAR failed\n");
774 efx->membase = ioremap_nocache(efx->membase_phys,
775 efx->type->mem_map_size);
777 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
779 (unsigned long long)efx->membase_phys,
780 efx->type->mem_map_size);
784 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
785 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
786 efx->type->mem_map_size, efx->membase);
791 pci_release_region(efx->pci_dev, efx->type->mem_bar);
793 efx->membase_phys = 0;
795 pci_disable_device(efx->pci_dev);
800 static void efx_fini_io(struct efx_nic *efx)
802 EFX_LOG(efx, "shutting down I/O\n");
805 iounmap(efx->membase);
809 if (efx->membase_phys) {
810 pci_release_region(efx->pci_dev, efx->type->mem_bar);
811 efx->membase_phys = 0;
814 pci_disable_device(efx->pci_dev);
817 /* Get number of RX queues wanted. Return number of online CPU
818 * packages in the expectation that an IRQ balancer will spread
819 * interrupts across them. */
820 static int efx_wanted_rx_queues(void)
826 cpus_clear(core_mask);
828 for_each_online_cpu(cpu) {
829 if (!cpu_isset(cpu, core_mask)) {
831 cpus_or(core_mask, core_mask,
832 topology_core_siblings(cpu));
839 /* Probe the number and type of interrupts we are able to obtain, and
840 * the resulting numbers of channels and RX queues.
842 static void efx_probe_interrupts(struct efx_nic *efx)
845 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
848 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
849 struct msix_entry xentries[EFX_MAX_CHANNELS];
853 /* We want one RX queue and interrupt per CPU package
854 * (or as specified by the rss_cpus module parameter).
855 * We will need one channel per interrupt.
857 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
858 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
859 wanted_ints = min(wanted_ints, max_channels);
861 for (i = 0; i < wanted_ints; i++)
862 xentries[i].entry = i;
863 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
865 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
866 " available (%d < %d).\n", rc, wanted_ints);
867 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
868 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
870 rc = pci_enable_msix(efx->pci_dev, xentries,
875 efx->n_rx_queues = min(rx_queues, wanted_ints);
876 efx->n_channels = wanted_ints;
877 for (i = 0; i < wanted_ints; i++)
878 efx->channel[i].irq = xentries[i].vector;
880 /* Fall back to single channel MSI */
881 efx->interrupt_mode = EFX_INT_MODE_MSI;
882 EFX_ERR(efx, "could not enable MSI-X\n");
886 /* Try single interrupt MSI */
887 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
888 efx->n_rx_queues = 1;
890 rc = pci_enable_msi(efx->pci_dev);
892 efx->channel[0].irq = efx->pci_dev->irq;
894 EFX_ERR(efx, "could not enable MSI\n");
895 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
899 /* Assume legacy interrupts */
900 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
901 efx->n_rx_queues = 1;
902 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
903 efx->legacy_irq = efx->pci_dev->irq;
907 static void efx_remove_interrupts(struct efx_nic *efx)
909 struct efx_channel *channel;
911 /* Remove MSI/MSI-X interrupts */
912 efx_for_each_channel(channel, efx)
914 pci_disable_msi(efx->pci_dev);
915 pci_disable_msix(efx->pci_dev);
917 /* Remove legacy interrupt */
921 static void efx_set_channels(struct efx_nic *efx)
923 struct efx_tx_queue *tx_queue;
924 struct efx_rx_queue *rx_queue;
926 efx_for_each_tx_queue(tx_queue, efx) {
927 if (separate_tx_channels)
928 tx_queue->channel = &efx->channel[efx->n_channels-1];
930 tx_queue->channel = &efx->channel[0];
931 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
934 efx_for_each_rx_queue(rx_queue, efx) {
935 rx_queue->channel = &efx->channel[rx_queue->queue];
936 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
940 static int efx_probe_nic(struct efx_nic *efx)
944 EFX_LOG(efx, "creating NIC\n");
946 /* Carry out hardware-type specific initialisation */
947 rc = falcon_probe_nic(efx);
951 /* Determine the number of channels and RX queues by trying to hook
952 * in MSI-X interrupts. */
953 efx_probe_interrupts(efx);
955 efx_set_channels(efx);
957 /* Initialise the interrupt moderation settings */
958 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
963 static void efx_remove_nic(struct efx_nic *efx)
965 EFX_LOG(efx, "destroying NIC\n");
967 efx_remove_interrupts(efx);
968 falcon_remove_nic(efx);
971 /**************************************************************************
973 * NIC startup/shutdown
975 *************************************************************************/
977 static int efx_probe_all(struct efx_nic *efx)
979 struct efx_channel *channel;
983 rc = efx_probe_nic(efx);
985 EFX_ERR(efx, "failed to create NIC\n");
990 rc = efx_probe_port(efx);
992 EFX_ERR(efx, "failed to create port\n");
996 /* Create channels */
997 efx_for_each_channel(channel, efx) {
998 rc = efx_probe_channel(channel);
1000 EFX_ERR(efx, "failed to create channel %d\n",
1009 efx_for_each_channel(channel, efx)
1010 efx_remove_channel(channel);
1011 efx_remove_port(efx);
1013 efx_remove_nic(efx);
1018 /* Called after previous invocation(s) of efx_stop_all, restarts the
1019 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1020 * and ensures that the port is scheduled to be reconfigured.
1021 * This function is safe to call multiple times when the NIC is in any
1023 static void efx_start_all(struct efx_nic *efx)
1025 struct efx_channel *channel;
1027 EFX_ASSERT_RESET_SERIALISED(efx);
1029 /* Check that it is appropriate to restart the interface. All
1030 * of these flags are safe to read under just the rtnl lock */
1031 if (efx->port_enabled)
1033 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1035 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1038 /* Mark the port as enabled so port reconfigurations can start, then
1039 * restart the transmit interface early so the watchdog timer stops */
1040 efx_start_port(efx);
1041 if (efx_dev_registered(efx))
1042 efx_wake_queue(efx);
1044 efx_for_each_channel(channel, efx)
1045 efx_start_channel(channel);
1047 falcon_enable_interrupts(efx);
1049 /* Start hardware monitor if we're in RUNNING */
1050 if (efx->state == STATE_RUNNING)
1051 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1052 efx_monitor_interval);
1055 /* Flush all delayed work. Should only be called when no more delayed work
1056 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1057 * since we're holding the rtnl_lock at this point. */
1058 static void efx_flush_all(struct efx_nic *efx)
1060 struct efx_rx_queue *rx_queue;
1062 /* Make sure the hardware monitor is stopped */
1063 cancel_delayed_work_sync(&efx->monitor_work);
1065 /* Ensure that all RX slow refills are complete. */
1066 efx_for_each_rx_queue(rx_queue, efx)
1067 cancel_delayed_work_sync(&rx_queue->work);
1069 /* Stop scheduled port reconfigurations */
1070 cancel_work_sync(&efx->reconfigure_work);
1074 /* Quiesce hardware and software without bringing the link down.
1075 * Safe to call multiple times, when the nic and interface is in any
1076 * state. The caller is guaranteed to subsequently be in a position
1077 * to modify any hardware and software state they see fit without
1079 static void efx_stop_all(struct efx_nic *efx)
1081 struct efx_channel *channel;
1083 EFX_ASSERT_RESET_SERIALISED(efx);
1085 /* port_enabled can be read safely under the rtnl lock */
1086 if (!efx->port_enabled)
1089 /* Disable interrupts and wait for ISR to complete */
1090 falcon_disable_interrupts(efx);
1091 if (efx->legacy_irq)
1092 synchronize_irq(efx->legacy_irq);
1093 efx_for_each_channel(channel, efx) {
1095 synchronize_irq(channel->irq);
1098 /* Stop all NAPI processing and synchronous rx refills */
1099 efx_for_each_channel(channel, efx)
1100 efx_stop_channel(channel);
1102 /* Stop all asynchronous port reconfigurations. Since all
1103 * event processing has already been stopped, there is no
1104 * window to loose phy events */
1107 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1110 /* Isolate the MAC from the TX and RX engines, so that queue
1111 * flushes will complete in a timely fashion. */
1112 falcon_drain_tx_fifo(efx);
1114 /* Stop the kernel transmit interface late, so the watchdog
1115 * timer isn't ticking over the flush */
1116 if (efx_dev_registered(efx)) {
1117 efx_stop_queue(efx);
1118 netif_tx_lock_bh(efx->net_dev);
1119 netif_tx_unlock_bh(efx->net_dev);
1123 static void efx_remove_all(struct efx_nic *efx)
1125 struct efx_channel *channel;
1127 efx_for_each_channel(channel, efx)
1128 efx_remove_channel(channel);
1129 efx_remove_port(efx);
1130 efx_remove_nic(efx);
1133 /* A convinience function to safely flush all the queues */
1134 void efx_flush_queues(struct efx_nic *efx)
1136 EFX_ASSERT_RESET_SERIALISED(efx);
1140 efx_fini_channels(efx);
1141 efx_init_channels(efx);
1146 /**************************************************************************
1148 * Interrupt moderation
1150 **************************************************************************/
1152 /* Set interrupt moderation parameters */
1153 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1155 struct efx_tx_queue *tx_queue;
1156 struct efx_rx_queue *rx_queue;
1158 EFX_ASSERT_RESET_SERIALISED(efx);
1160 efx_for_each_tx_queue(tx_queue, efx)
1161 tx_queue->channel->irq_moderation = tx_usecs;
1163 efx_for_each_rx_queue(rx_queue, efx)
1164 rx_queue->channel->irq_moderation = rx_usecs;
1167 /**************************************************************************
1171 **************************************************************************/
1173 /* Run periodically off the general workqueue. Serialised against
1174 * efx_reconfigure_port via the mac_lock */
1175 static void efx_monitor(struct work_struct *data)
1177 struct efx_nic *efx = container_of(data, struct efx_nic,
1181 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1182 raw_smp_processor_id());
1185 /* If the mac_lock is already held then it is likely a port
1186 * reconfiguration is already in place, which will likely do
1187 * most of the work of check_hw() anyway. */
1188 if (!mutex_trylock(&efx->mac_lock)) {
1189 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1190 efx_monitor_interval);
1194 if (efx->port_enabled)
1195 rc = falcon_check_xmac(efx);
1196 mutex_unlock(&efx->mac_lock);
1198 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1199 efx_monitor_interval);
1202 /**************************************************************************
1206 *************************************************************************/
1209 * Context: process, rtnl_lock() held.
1211 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1213 struct efx_nic *efx = netdev_priv(net_dev);
1215 EFX_ASSERT_RESET_SERIALISED(efx);
1217 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1220 /**************************************************************************
1224 **************************************************************************/
1226 static int efx_init_napi(struct efx_nic *efx)
1228 struct efx_channel *channel;
1231 efx_for_each_channel(channel, efx) {
1232 channel->napi_dev = efx->net_dev;
1233 rc = efx_lro_init(&channel->lro_mgr, efx);
1243 static void efx_fini_napi(struct efx_nic *efx)
1245 struct efx_channel *channel;
1247 efx_for_each_channel(channel, efx) {
1248 efx_lro_fini(&channel->lro_mgr);
1249 channel->napi_dev = NULL;
1253 /**************************************************************************
1255 * Kernel netpoll interface
1257 *************************************************************************/
1259 #ifdef CONFIG_NET_POLL_CONTROLLER
1261 /* Although in the common case interrupts will be disabled, this is not
1262 * guaranteed. However, all our work happens inside the NAPI callback,
1263 * so no locking is required.
1265 static void efx_netpoll(struct net_device *net_dev)
1267 struct efx_nic *efx = netdev_priv(net_dev);
1268 struct efx_channel *channel;
1270 efx_for_each_channel(channel, efx)
1271 efx_schedule_channel(channel);
1276 /**************************************************************************
1278 * Kernel net device interface
1280 *************************************************************************/
1282 /* Context: process, rtnl_lock() held. */
1283 static int efx_net_open(struct net_device *net_dev)
1285 struct efx_nic *efx = netdev_priv(net_dev);
1286 EFX_ASSERT_RESET_SERIALISED(efx);
1288 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1289 raw_smp_processor_id());
1291 if (efx->phy_mode & PHY_MODE_SPECIAL)
1298 /* Context: process, rtnl_lock() held.
1299 * Note that the kernel will ignore our return code; this method
1300 * should really be a void.
1302 static int efx_net_stop(struct net_device *net_dev)
1304 struct efx_nic *efx = netdev_priv(net_dev);
1306 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1307 raw_smp_processor_id());
1309 /* Stop the device and flush all the channels */
1311 efx_fini_channels(efx);
1312 efx_init_channels(efx);
1317 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1318 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1320 struct efx_nic *efx = netdev_priv(net_dev);
1321 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1322 struct net_device_stats *stats = &net_dev->stats;
1324 /* Update stats if possible, but do not wait if another thread
1325 * is updating them (or resetting the NIC); slightly stale
1326 * stats are acceptable.
1328 if (!spin_trylock(&efx->stats_lock))
1330 if (efx->stats_enabled) {
1331 falcon_update_stats_xmac(efx);
1332 falcon_update_nic_stats(efx);
1334 spin_unlock(&efx->stats_lock);
1336 stats->rx_packets = mac_stats->rx_packets;
1337 stats->tx_packets = mac_stats->tx_packets;
1338 stats->rx_bytes = mac_stats->rx_bytes;
1339 stats->tx_bytes = mac_stats->tx_bytes;
1340 stats->multicast = mac_stats->rx_multicast;
1341 stats->collisions = mac_stats->tx_collision;
1342 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1343 mac_stats->rx_length_error);
1344 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1345 stats->rx_crc_errors = mac_stats->rx_bad;
1346 stats->rx_frame_errors = mac_stats->rx_align_error;
1347 stats->rx_fifo_errors = mac_stats->rx_overflow;
1348 stats->rx_missed_errors = mac_stats->rx_missed;
1349 stats->tx_window_errors = mac_stats->tx_late_collision;
1351 stats->rx_errors = (stats->rx_length_errors +
1352 stats->rx_over_errors +
1353 stats->rx_crc_errors +
1354 stats->rx_frame_errors +
1355 stats->rx_fifo_errors +
1356 stats->rx_missed_errors +
1357 mac_stats->rx_symbol_error);
1358 stats->tx_errors = (stats->tx_window_errors +
1364 /* Context: netif_tx_lock held, BHs disabled. */
1365 static void efx_watchdog(struct net_device *net_dev)
1367 struct efx_nic *efx = netdev_priv(net_dev);
1369 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1370 " resetting channels\n",
1371 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1373 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1377 /* Context: process, rtnl_lock() held. */
1378 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1380 struct efx_nic *efx = netdev_priv(net_dev);
1383 EFX_ASSERT_RESET_SERIALISED(efx);
1385 if (new_mtu > EFX_MAX_MTU)
1390 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1392 efx_fini_channels(efx);
1393 net_dev->mtu = new_mtu;
1394 efx_init_channels(efx);
1400 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1402 struct efx_nic *efx = netdev_priv(net_dev);
1403 struct sockaddr *addr = data;
1404 char *new_addr = addr->sa_data;
1406 EFX_ASSERT_RESET_SERIALISED(efx);
1408 if (!is_valid_ether_addr(new_addr)) {
1409 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1414 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1416 /* Reconfigure the MAC */
1417 efx_reconfigure_port(efx);
1422 /* Context: netif_addr_lock held, BHs disabled. */
1423 static void efx_set_multicast_list(struct net_device *net_dev)
1425 struct efx_nic *efx = netdev_priv(net_dev);
1426 struct dev_mc_list *mc_list = net_dev->mc_list;
1427 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1428 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1429 bool changed = (efx->promiscuous != promiscuous);
1434 efx->promiscuous = promiscuous;
1436 /* Build multicast hash table */
1437 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1438 memset(mc_hash, 0xff, sizeof(*mc_hash));
1440 memset(mc_hash, 0x00, sizeof(*mc_hash));
1441 for (i = 0; i < net_dev->mc_count; i++) {
1442 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1443 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1444 set_bit_le(bit, mc_hash->byte);
1445 mc_list = mc_list->next;
1449 if (!efx->port_enabled)
1450 /* Delay pushing settings until efx_start_port() */
1454 queue_work(efx->workqueue, &efx->reconfigure_work);
1456 /* Create and activate new global multicast hash table */
1457 falcon_set_multicast_hash(efx);
1460 static const struct net_device_ops efx_netdev_ops = {
1461 .ndo_open = efx_net_open,
1462 .ndo_stop = efx_net_stop,
1463 .ndo_get_stats = efx_net_stats,
1464 .ndo_tx_timeout = efx_watchdog,
1465 .ndo_start_xmit = efx_hard_start_xmit,
1466 .ndo_validate_addr = eth_validate_addr,
1467 .ndo_do_ioctl = efx_ioctl,
1468 .ndo_change_mtu = efx_change_mtu,
1469 .ndo_set_mac_address = efx_set_mac_address,
1470 .ndo_set_multicast_list = efx_set_multicast_list,
1471 #ifdef CONFIG_NET_POLL_CONTROLLER
1472 .ndo_poll_controller = efx_netpoll,
1476 static int efx_netdev_event(struct notifier_block *this,
1477 unsigned long event, void *ptr)
1479 struct net_device *net_dev = ptr;
1481 if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) {
1482 struct efx_nic *efx = netdev_priv(net_dev);
1484 strcpy(efx->name, net_dev->name);
1485 efx_mtd_rename(efx);
1491 static struct notifier_block efx_netdev_notifier = {
1492 .notifier_call = efx_netdev_event,
1495 static int efx_register_netdev(struct efx_nic *efx)
1497 struct net_device *net_dev = efx->net_dev;
1500 net_dev->watchdog_timeo = 5 * HZ;
1501 net_dev->irq = efx->pci_dev->irq;
1502 net_dev->netdev_ops = &efx_netdev_ops;
1503 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1504 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1506 /* Always start with carrier off; PHY events will detect the link */
1507 netif_carrier_off(efx->net_dev);
1509 /* Clear MAC statistics */
1510 falcon_update_stats_xmac(efx);
1511 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1513 rc = register_netdev(net_dev);
1515 EFX_ERR(efx, "could not register net dev\n");
1518 strcpy(efx->name, net_dev->name);
1523 static void efx_unregister_netdev(struct efx_nic *efx)
1525 struct efx_tx_queue *tx_queue;
1530 BUG_ON(netdev_priv(efx->net_dev) != efx);
1532 /* Free up any skbs still remaining. This has to happen before
1533 * we try to unregister the netdev as running their destructors
1534 * may be needed to get the device ref. count to 0. */
1535 efx_for_each_tx_queue(tx_queue, efx)
1536 efx_release_tx_buffers(tx_queue);
1538 if (efx_dev_registered(efx)) {
1539 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1540 unregister_netdev(efx->net_dev);
1544 /**************************************************************************
1546 * Device reset and suspend
1548 **************************************************************************/
1550 /* Tears down the entire software state and most of the hardware state
1552 void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1556 EFX_ASSERT_RESET_SERIALISED(efx);
1558 /* The net_dev->get_stats handler is quite slow, and will fail
1559 * if a fetch is pending over reset. Serialise against it. */
1560 spin_lock(&efx->stats_lock);
1561 efx->stats_enabled = false;
1562 spin_unlock(&efx->stats_lock);
1565 mutex_lock(&efx->mac_lock);
1566 mutex_lock(&efx->spi_lock);
1568 rc = falcon_xmac_get_settings(efx, ecmd);
1570 EFX_ERR(efx, "could not back up PHY settings\n");
1572 efx_fini_channels(efx);
1575 /* This function will always ensure that the locks acquired in
1576 * efx_reset_down() are released. A failure return code indicates
1577 * that we were unable to reinitialise the hardware, and the
1578 * driver should be disabled. If ok is false, then the rx and tx
1579 * engines are not restarted, pending a RESET_DISABLE. */
1580 int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1584 EFX_ASSERT_RESET_SERIALISED(efx);
1586 rc = falcon_init_nic(efx);
1588 EFX_ERR(efx, "failed to initialise NIC\n");
1593 efx_init_channels(efx);
1595 if (falcon_xmac_set_settings(efx, ecmd))
1596 EFX_ERR(efx, "could not restore PHY settings\n");
1599 mutex_unlock(&efx->spi_lock);
1600 mutex_unlock(&efx->mac_lock);
1604 efx->stats_enabled = true;
1609 /* Reset the NIC as transparently as possible. Do not reset the PHY
1610 * Note that the reset may fail, in which case the card will be left
1611 * in a most-probably-unusable state.
1613 * This function will sleep. You cannot reset from within an atomic
1614 * state; use efx_schedule_reset() instead.
1616 * Grabs the rtnl_lock.
1618 static int efx_reset(struct efx_nic *efx)
1620 struct ethtool_cmd ecmd;
1621 enum reset_type method = efx->reset_pending;
1624 /* Serialise with kernel interfaces */
1627 /* If we're not RUNNING then don't reset. Leave the reset_pending
1628 * flag set so that efx_pci_probe_main will be retried */
1629 if (efx->state != STATE_RUNNING) {
1630 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1634 EFX_INFO(efx, "resetting (%d)\n", method);
1636 efx_reset_down(efx, &ecmd);
1638 rc = falcon_reset_hw(efx, method);
1640 EFX_ERR(efx, "failed to reset hardware\n");
1644 /* Allow resets to be rescheduled. */
1645 efx->reset_pending = RESET_TYPE_NONE;
1647 /* Reinitialise bus-mastering, which may have been turned off before
1648 * the reset was scheduled. This is still appropriate, even in the
1649 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1650 * can respond to requests. */
1651 pci_set_master(efx->pci_dev);
1653 /* Leave device stopped if necessary */
1654 if (method == RESET_TYPE_DISABLE) {
1659 rc = efx_reset_up(efx, &ecmd, true);
1663 EFX_LOG(efx, "reset complete\n");
1669 efx_reset_up(efx, &ecmd, false);
1671 EFX_ERR(efx, "has been disabled\n");
1672 efx->state = STATE_DISABLED;
1675 efx_unregister_netdev(efx);
1680 /* The worker thread exists so that code that cannot sleep can
1681 * schedule a reset for later.
1683 static void efx_reset_work(struct work_struct *data)
1685 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1690 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1692 enum reset_type method;
1694 if (efx->reset_pending != RESET_TYPE_NONE) {
1695 EFX_INFO(efx, "quenching already scheduled reset\n");
1700 case RESET_TYPE_INVISIBLE:
1701 case RESET_TYPE_ALL:
1702 case RESET_TYPE_WORLD:
1703 case RESET_TYPE_DISABLE:
1706 case RESET_TYPE_RX_RECOVERY:
1707 case RESET_TYPE_RX_DESC_FETCH:
1708 case RESET_TYPE_TX_DESC_FETCH:
1709 case RESET_TYPE_TX_SKIP:
1710 method = RESET_TYPE_INVISIBLE;
1713 method = RESET_TYPE_ALL;
1718 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1720 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1722 efx->reset_pending = method;
1724 queue_work(reset_workqueue, &efx->reset_work);
1727 /**************************************************************************
1729 * List of NICs we support
1731 **************************************************************************/
1733 /* PCI device ID table */
1734 static struct pci_device_id efx_pci_table[] __devinitdata = {
1735 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1736 .driver_data = (unsigned long) &falcon_a_nic_type},
1737 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1738 .driver_data = (unsigned long) &falcon_b_nic_type},
1739 {0} /* end of list */
1742 /**************************************************************************
1744 * Dummy PHY/MAC/Board operations
1746 * Can be used for some unimplemented operations
1747 * Needed so all function pointers are valid and do not have to be tested
1750 **************************************************************************/
1751 int efx_port_dummy_op_int(struct efx_nic *efx)
1755 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1756 void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1758 static struct efx_phy_operations efx_dummy_phy_operations = {
1759 .init = efx_port_dummy_op_int,
1760 .reconfigure = efx_port_dummy_op_void,
1761 .check_hw = efx_port_dummy_op_int,
1762 .fini = efx_port_dummy_op_void,
1763 .clear_interrupt = efx_port_dummy_op_void,
1766 static struct efx_board efx_dummy_board_info = {
1767 .init = efx_port_dummy_op_int,
1768 .init_leds = efx_port_dummy_op_int,
1769 .set_fault_led = efx_port_dummy_op_blink,
1770 .monitor = efx_port_dummy_op_int,
1771 .blink = efx_port_dummy_op_blink,
1772 .fini = efx_port_dummy_op_void,
1775 /**************************************************************************
1779 **************************************************************************/
1781 /* This zeroes out and then fills in the invariants in a struct
1782 * efx_nic (including all sub-structures).
1784 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1785 struct pci_dev *pci_dev, struct net_device *net_dev)
1787 struct efx_channel *channel;
1788 struct efx_tx_queue *tx_queue;
1789 struct efx_rx_queue *rx_queue;
1792 /* Initialise common structures */
1793 memset(efx, 0, sizeof(*efx));
1794 spin_lock_init(&efx->biu_lock);
1795 spin_lock_init(&efx->phy_lock);
1796 mutex_init(&efx->spi_lock);
1797 INIT_WORK(&efx->reset_work, efx_reset_work);
1798 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1799 efx->pci_dev = pci_dev;
1800 efx->state = STATE_INIT;
1801 efx->reset_pending = RESET_TYPE_NONE;
1802 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1803 efx->board_info = efx_dummy_board_info;
1805 efx->net_dev = net_dev;
1806 efx->rx_checksum_enabled = true;
1807 spin_lock_init(&efx->netif_stop_lock);
1808 spin_lock_init(&efx->stats_lock);
1809 mutex_init(&efx->mac_lock);
1810 efx->phy_op = &efx_dummy_phy_operations;
1811 efx->mii.dev = net_dev;
1812 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1813 atomic_set(&efx->netif_stop_count, 1);
1815 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1816 channel = &efx->channel[i];
1818 channel->channel = i;
1819 channel->work_pending = false;
1821 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1822 tx_queue = &efx->tx_queue[i];
1823 tx_queue->efx = efx;
1824 tx_queue->queue = i;
1825 tx_queue->buffer = NULL;
1826 tx_queue->channel = &efx->channel[0]; /* for safety */
1827 tx_queue->tso_headers_free = NULL;
1829 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1830 rx_queue = &efx->rx_queue[i];
1831 rx_queue->efx = efx;
1832 rx_queue->queue = i;
1833 rx_queue->channel = &efx->channel[0]; /* for safety */
1834 rx_queue->buffer = NULL;
1835 spin_lock_init(&rx_queue->add_lock);
1836 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1841 /* Sanity-check NIC type */
1842 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1843 (efx->type->txd_ring_mask + 1));
1844 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1845 (efx->type->rxd_ring_mask + 1));
1846 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1847 (efx->type->evq_size - 1));
1848 /* As close as we can get to guaranteeing that we don't overflow */
1849 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1850 (efx->type->txd_ring_mask + 1 +
1851 efx->type->rxd_ring_mask + 1));
1852 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1854 /* Higher numbered interrupt modes are less capable! */
1855 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1858 efx->workqueue = create_singlethread_workqueue("sfc_work");
1859 if (!efx->workqueue)
1865 static void efx_fini_struct(struct efx_nic *efx)
1867 if (efx->workqueue) {
1868 destroy_workqueue(efx->workqueue);
1869 efx->workqueue = NULL;
1873 /**************************************************************************
1877 **************************************************************************/
1879 /* Main body of final NIC shutdown code
1880 * This is called only at module unload (or hotplug removal).
1882 static void efx_pci_remove_main(struct efx_nic *efx)
1884 EFX_ASSERT_RESET_SERIALISED(efx);
1886 /* Skip everything if we never obtained a valid membase */
1890 efx_fini_channels(efx);
1893 /* Shutdown the board, then the NIC and board state */
1894 efx->board_info.fini(efx);
1895 falcon_fini_interrupt(efx);
1898 efx_remove_all(efx);
1901 /* Final NIC shutdown
1902 * This is called only at module unload (or hotplug removal).
1904 static void efx_pci_remove(struct pci_dev *pci_dev)
1906 struct efx_nic *efx;
1908 efx = pci_get_drvdata(pci_dev);
1912 efx_mtd_remove(efx);
1914 /* Mark the NIC as fini, then stop the interface */
1916 efx->state = STATE_FINI;
1917 dev_close(efx->net_dev);
1919 /* Allow any queued efx_resets() to complete */
1922 if (efx->membase == NULL)
1925 efx_unregister_netdev(efx);
1927 /* Wait for any scheduled resets to complete. No more will be
1928 * scheduled from this point because efx_stop_all() has been
1929 * called, we are no longer registered with driverlink, and
1930 * the net_device's have been removed. */
1931 cancel_work_sync(&efx->reset_work);
1933 efx_pci_remove_main(efx);
1937 EFX_LOG(efx, "shutdown successful\n");
1939 pci_set_drvdata(pci_dev, NULL);
1940 efx_fini_struct(efx);
1941 free_netdev(efx->net_dev);
1944 /* Main body of NIC initialisation
1945 * This is called at module load (or hotplug insertion, theoretically).
1947 static int efx_pci_probe_main(struct efx_nic *efx)
1951 /* Do start-of-day initialisation */
1952 rc = efx_probe_all(efx);
1956 rc = efx_init_napi(efx);
1960 /* Initialise the board */
1961 rc = efx->board_info.init(efx);
1963 EFX_ERR(efx, "failed to initialise board\n");
1967 rc = falcon_init_nic(efx);
1969 EFX_ERR(efx, "failed to initialise NIC\n");
1973 rc = efx_init_port(efx);
1975 EFX_ERR(efx, "failed to initialise port\n");
1979 efx_init_channels(efx);
1981 rc = falcon_init_interrupt(efx);
1988 efx_fini_channels(efx);
1992 efx->board_info.fini(efx);
1996 efx_remove_all(efx);
2001 /* NIC initialisation
2003 * This is called at module load (or hotplug insertion,
2004 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2005 * sets up and registers the network devices with the kernel and hooks
2006 * the interrupt service routine. It does not prepare the device for
2007 * transmission; this is left to the first time one of the network
2008 * interfaces is brought up (i.e. efx_net_open).
2010 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2011 const struct pci_device_id *entry)
2013 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2014 struct net_device *net_dev;
2015 struct efx_nic *efx;
2018 /* Allocate and initialise a struct net_device and struct efx_nic */
2019 net_dev = alloc_etherdev(sizeof(*efx));
2022 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2023 NETIF_F_HIGHDMA | NETIF_F_TSO);
2025 net_dev->features |= NETIF_F_LRO;
2026 /* Mask for features that also apply to VLAN devices */
2027 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2028 NETIF_F_HIGHDMA | NETIF_F_TSO);
2029 efx = netdev_priv(net_dev);
2030 pci_set_drvdata(pci_dev, efx);
2031 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2035 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2037 /* Set up basic I/O (BAR mappings etc) */
2038 rc = efx_init_io(efx);
2042 /* No serialisation is required with the reset path because
2043 * we're in STATE_INIT. */
2044 for (i = 0; i < 5; i++) {
2045 rc = efx_pci_probe_main(efx);
2049 /* Serialise against efx_reset(). No more resets will be
2050 * scheduled since efx_stop_all() has been called, and we
2051 * have not and never have been registered with either
2052 * the rtnetlink or driverlink layers. */
2053 cancel_work_sync(&efx->reset_work);
2055 /* Retry if a recoverably reset event has been scheduled */
2056 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2057 (efx->reset_pending != RESET_TYPE_ALL))
2060 efx->reset_pending = RESET_TYPE_NONE;
2064 EFX_ERR(efx, "Could not reset NIC\n");
2068 /* Switch to the running state before we expose the device to
2069 * the OS. This is to ensure that the initial gathering of
2070 * MAC stats succeeds. */
2072 efx->state = STATE_RUNNING;
2075 rc = efx_register_netdev(efx);
2079 EFX_LOG(efx, "initialisation successful\n");
2081 efx_mtd_probe(efx); /* allowed to fail */
2085 efx_pci_remove_main(efx);
2090 efx_fini_struct(efx);
2092 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2093 free_netdev(net_dev);
2097 static struct pci_driver efx_pci_driver = {
2098 .name = EFX_DRIVER_NAME,
2099 .id_table = efx_pci_table,
2100 .probe = efx_pci_probe,
2101 .remove = efx_pci_remove,
2104 /**************************************************************************
2106 * Kernel module interface
2108 *************************************************************************/
2110 module_param(interrupt_mode, uint, 0444);
2111 MODULE_PARM_DESC(interrupt_mode,
2112 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2114 static int __init efx_init_module(void)
2118 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2120 rc = register_netdevice_notifier(&efx_netdev_notifier);
2124 refill_workqueue = create_workqueue("sfc_refill");
2125 if (!refill_workqueue) {
2129 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2130 if (!reset_workqueue) {
2135 rc = pci_register_driver(&efx_pci_driver);
2142 destroy_workqueue(reset_workqueue);
2144 destroy_workqueue(refill_workqueue);
2146 unregister_netdevice_notifier(&efx_netdev_notifier);
2151 static void __exit efx_exit_module(void)
2153 printk(KERN_INFO "Solarflare NET driver unloading\n");
2155 pci_unregister_driver(&efx_pci_driver);
2156 destroy_workqueue(reset_workqueue);
2157 destroy_workqueue(refill_workqueue);
2158 unregister_netdevice_notifier(&efx_netdev_notifier);
2162 module_init(efx_init_module);
2163 module_exit(efx_exit_module);
2165 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2166 "Solarflare Communications");
2167 MODULE_DESCRIPTION("Solarflare Communications network driver");
2168 MODULE_LICENSE("GPL");
2169 MODULE_DEVICE_TABLE(pci, efx_pci_table);