2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
208 static inline int emac_phy_gpcs(int phy_mode)
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
214 static inline void emac_tx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 DBG(dev, "tx_enable" NL);
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 static void emac_tx_disable(struct emac_instance *dev)
228 struct emac_regs __iomem *p = dev->emacp;
231 DBG(dev, "tx_disable" NL);
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
242 emac_report_timeout_error(dev, "TX disable timeout");
246 static void emac_rx_enable(struct emac_instance *dev)
248 struct emac_regs __iomem *p = dev->emacp;
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 DBG(dev, "rx_enable" NL);
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
275 static void emac_rx_disable(struct emac_instance *dev)
277 struct emac_regs __iomem *p = dev->emacp;
280 DBG(dev, "rx_disable" NL);
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 emac_report_timeout_error(dev, "RX disable timeout");
295 static inline void emac_netif_stop(struct emac_instance *dev)
297 netif_tx_lock_bh(dev->ndev);
299 netif_tx_unlock_bh(dev->ndev);
300 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
301 mal_poll_disable(dev->mal, &dev->commac);
302 netif_tx_disable(dev->ndev);
305 static inline void emac_netif_start(struct emac_instance *dev)
307 netif_tx_lock_bh(dev->ndev);
309 if (dev->mcast_pending && netif_running(dev->ndev))
310 __emac_set_multicast_list(dev);
311 netif_tx_unlock_bh(dev->ndev);
313 netif_wake_queue(dev->ndev);
315 /* NOTE: unconditional netif_wake_queue is only appropriate
316 * so long as all callers are assured to have free tx slots
317 * (taken from tg3... though the case where that is wrong is
318 * not terribly harmful)
320 mal_poll_enable(dev->mal, &dev->commac);
323 static inline void emac_rx_disable_async(struct emac_instance *dev)
325 struct emac_regs __iomem *p = dev->emacp;
328 DBG(dev, "rx_disable_async" NL);
330 r = in_be32(&p->mr0);
331 if (r & EMAC_MR0_RXE)
332 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
335 static int emac_reset(struct emac_instance *dev)
337 struct emac_regs __iomem *p = dev->emacp;
340 DBG(dev, "reset" NL);
342 if (!dev->reset_failed) {
343 /* 40x erratum suggests stopping RX channel before reset,
346 emac_rx_disable(dev);
347 emac_tx_disable(dev);
350 out_be32(&p->mr0, EMAC_MR0_SRST);
351 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
355 dev->reset_failed = 0;
358 emac_report_timeout_error(dev, "reset timeout");
359 dev->reset_failed = 1;
364 static void emac_hash_mc(struct emac_instance *dev)
366 const int regs = EMAC_XAHT_REGS(dev);
367 u32 *gaht_base = emac_gaht_base(dev);
369 struct dev_mc_list *dmi;
372 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
374 memset(gaht_temp, 0, sizeof (gaht_temp));
376 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
378 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
379 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
380 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
382 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
383 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
384 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
386 gaht_temp[reg] |= mask;
389 for (i = 0; i < regs; i++)
390 out_be32(gaht_base + i, gaht_temp[i]);
393 static inline u32 emac_iff2rmr(struct net_device *ndev)
395 struct emac_instance *dev = netdev_priv(ndev);
398 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
400 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
405 if (ndev->flags & IFF_PROMISC)
407 else if (ndev->flags & IFF_ALLMULTI ||
408 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
410 else if (ndev->mc_count > 0)
416 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
418 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
420 DBG2(dev, "__emac_calc_base_mr1" NL);
424 ret |= EMAC_MR1_TFS_2K;
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
433 ret |= EMAC_MR1_RFS_16K;
436 ret |= EMAC_MR1_RFS_4K;
439 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
440 dev->ndev->name, rx_size);
446 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
448 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
449 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
451 DBG2(dev, "__emac4_calc_base_mr1" NL);
455 ret |= EMAC4_MR1_TFS_4K;
458 ret |= EMAC4_MR1_TFS_2K;
461 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
462 dev->ndev->name, tx_size);
467 ret |= EMAC4_MR1_RFS_16K;
470 ret |= EMAC4_MR1_RFS_4K;
473 ret |= EMAC4_MR1_RFS_2K;
476 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
477 dev->ndev->name, rx_size);
483 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
485 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
486 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
487 __emac_calc_base_mr1(dev, tx_size, rx_size);
490 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
492 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
493 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
495 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
498 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
499 unsigned int low, unsigned int high)
501 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
502 return (low << 22) | ( (high & 0x3ff) << 6);
504 return (low << 23) | ( (high & 0x1ff) << 7);
507 static int emac_configure(struct emac_instance *dev)
509 struct emac_regs __iomem *p = dev->emacp;
510 struct net_device *ndev = dev->ndev;
511 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
514 DBG(dev, "configure" NL);
517 out_be32(&p->mr1, in_be32(&p->mr1)
518 | EMAC_MR1_FDE | EMAC_MR1_ILE);
520 } else if (emac_reset(dev) < 0)
523 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
524 tah_reset(dev->tah_dev);
526 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
527 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
529 /* Default fifo sizes */
530 tx_size = dev->tx_fifo_size;
531 rx_size = dev->rx_fifo_size;
533 /* No link, force loopback */
535 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
537 /* Check for full duplex */
538 else if (dev->phy.duplex == DUPLEX_FULL)
539 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
541 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
542 dev->stop_timeout = STOP_TIMEOUT_10;
543 switch (dev->phy.speed) {
545 if (emac_phy_gpcs(dev->phy.mode)) {
546 mr1 |= EMAC_MR1_MF_1000GPCS |
547 EMAC_MR1_MF_IPPA(dev->phy.address);
549 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
550 * identify this GPCS PHY later.
552 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
554 mr1 |= EMAC_MR1_MF_1000;
556 /* Extended fifo sizes */
557 tx_size = dev->tx_fifo_size_gige;
558 rx_size = dev->rx_fifo_size_gige;
560 if (dev->ndev->mtu > ETH_DATA_LEN) {
561 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
562 mr1 |= EMAC4_MR1_JPSM;
564 mr1 |= EMAC_MR1_JPSM;
565 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
567 dev->stop_timeout = STOP_TIMEOUT_1000;
570 mr1 |= EMAC_MR1_MF_100;
571 dev->stop_timeout = STOP_TIMEOUT_100;
573 default: /* make gcc happy */
577 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
578 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
580 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
581 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
583 /* on 40x erratum forces us to NOT use integrated flow control,
584 * let's hope it works on 44x ;)
586 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
587 dev->phy.duplex == DUPLEX_FULL) {
589 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
590 else if (dev->phy.asym_pause)
594 /* Add base settings & fifo sizes & program MR1 */
595 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
596 out_be32(&p->mr1, mr1);
598 /* Set individual MAC address */
599 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
600 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
601 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
604 /* VLAN Tag Protocol ID */
605 out_be32(&p->vtpid, 0x8100);
607 /* Receive mode register */
608 r = emac_iff2rmr(ndev);
609 if (r & EMAC_RMR_MAE)
611 out_be32(&p->rmr, r);
613 /* FIFOs thresholds */
614 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
615 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
616 tx_size / 2 / dev->fifo_entry_size);
618 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
619 tx_size / 2 / dev->fifo_entry_size);
620 out_be32(&p->tmr1, r);
621 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
623 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
624 there should be still enough space in FIFO to allow the our link
625 partner time to process this frame and also time to send PAUSE
628 Here is the worst case scenario for the RX FIFO "headroom"
629 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
631 1) One maximum-length frame on TX 1522 bytes
632 2) One PAUSE frame time 64 bytes
633 3) PAUSE frame decode time allowance 64 bytes
634 4) One maximum-length frame on RX 1522 bytes
635 5) Round-trip propagation delay of the link (100Mb) 15 bytes
639 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
640 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
642 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
643 rx_size / 4 / dev->fifo_entry_size);
644 out_be32(&p->rwmr, r);
646 /* Set PAUSE timer to the maximum */
647 out_be32(&p->ptr, 0xffff);
650 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
651 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
652 EMAC_ISR_IRE | EMAC_ISR_TE;
653 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
654 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
656 out_be32(&p->iser, r);
658 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
659 if (emac_phy_gpcs(dev->phy.mode))
660 emac_mii_reset_phy(&dev->phy);
662 /* Required for Pause packet support in EMAC */
663 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
668 static void emac_reinitialize(struct emac_instance *dev)
670 DBG(dev, "reinitialize" NL);
672 emac_netif_stop(dev);
673 if (!emac_configure(dev)) {
677 emac_netif_start(dev);
680 static void emac_full_tx_reset(struct emac_instance *dev)
682 DBG(dev, "full_tx_reset" NL);
684 emac_tx_disable(dev);
685 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
686 emac_clean_tx_ring(dev);
687 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
691 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
696 static void emac_reset_work(struct work_struct *work)
698 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
700 DBG(dev, "reset_work" NL);
702 mutex_lock(&dev->link_lock);
704 emac_netif_stop(dev);
705 emac_full_tx_reset(dev);
706 emac_netif_start(dev);
708 mutex_unlock(&dev->link_lock);
711 static void emac_tx_timeout(struct net_device *ndev)
713 struct emac_instance *dev = netdev_priv(ndev);
715 DBG(dev, "tx_timeout" NL);
717 schedule_work(&dev->reset_work);
721 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
723 int done = !!(stacr & EMAC_STACR_OC);
725 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
731 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
733 struct emac_regs __iomem *p = dev->emacp;
735 int n, err = -ETIMEDOUT;
737 mutex_lock(&dev->mdio_lock);
739 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
741 /* Enable proper MDIO port */
742 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
743 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
744 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
745 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
747 /* Wait for management interface to become idle */
749 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
752 DBG2(dev, " -> timeout wait idle\n");
757 /* Issue read command */
758 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
759 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
761 r = EMAC_STACR_BASE(dev->opb_bus_freq);
762 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
764 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
765 r |= EMACX_STACR_STAC_READ;
767 r |= EMAC_STACR_STAC_READ;
768 r |= (reg & EMAC_STACR_PRA_MASK)
769 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
770 out_be32(&p->stacr, r);
772 /* Wait for read to complete */
774 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
777 DBG2(dev, " -> timeout wait complete\n");
782 if (unlikely(r & EMAC_STACR_PHYE)) {
783 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
788 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
790 DBG2(dev, "mdio_read -> %04x" NL, r);
793 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
794 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
795 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
796 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
797 mutex_unlock(&dev->mdio_lock);
799 return err == 0 ? r : err;
802 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
805 struct emac_regs __iomem *p = dev->emacp;
807 int n, err = -ETIMEDOUT;
809 mutex_lock(&dev->mdio_lock);
811 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
813 /* Enable proper MDIO port */
814 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
815 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
816 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
817 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
819 /* Wait for management interface to be idle */
821 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
824 DBG2(dev, " -> timeout wait idle\n");
829 /* Issue write command */
830 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
831 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
833 r = EMAC_STACR_BASE(dev->opb_bus_freq);
834 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
836 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
837 r |= EMACX_STACR_STAC_WRITE;
839 r |= EMAC_STACR_STAC_WRITE;
840 r |= (reg & EMAC_STACR_PRA_MASK) |
841 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
842 (val << EMAC_STACR_PHYD_SHIFT);
843 out_be32(&p->stacr, r);
845 /* Wait for write to complete */
847 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
850 DBG2(dev, " -> timeout wait complete\n");
856 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
857 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
858 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
859 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
860 mutex_unlock(&dev->mdio_lock);
863 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
865 struct emac_instance *dev = netdev_priv(ndev);
868 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
873 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
875 struct emac_instance *dev = netdev_priv(ndev);
877 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
878 (u8) id, (u8) reg, (u16) val);
882 static void __emac_set_multicast_list(struct emac_instance *dev)
884 struct emac_regs __iomem *p = dev->emacp;
885 u32 rmr = emac_iff2rmr(dev->ndev);
887 DBG(dev, "__multicast %08x" NL, rmr);
889 /* I decided to relax register access rules here to avoid
892 * There is a real problem with EMAC4 core if we use MWSW_001 bit
893 * in MR1 register and do a full EMAC reset.
894 * One TX BD status update is delayed and, after EMAC reset, it
895 * never happens, resulting in TX hung (it'll be recovered by TX
896 * timeout handler eventually, but this is just gross).
897 * So we either have to do full TX reset or try to cheat here :)
899 * The only required change is to RX mode register, so I *think* all
900 * we need is just to stop RX channel. This seems to work on all
903 * If we need the full reset, we might just trigger the workqueue
904 * and do it async... a bit nasty but should work --BenH
906 dev->mcast_pending = 0;
907 emac_rx_disable(dev);
908 if (rmr & EMAC_RMR_MAE)
910 out_be32(&p->rmr, rmr);
915 static void emac_set_multicast_list(struct net_device *ndev)
917 struct emac_instance *dev = netdev_priv(ndev);
919 DBG(dev, "multicast" NL);
921 BUG_ON(!netif_running(dev->ndev));
924 dev->mcast_pending = 1;
927 __emac_set_multicast_list(dev);
930 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
932 int rx_sync_size = emac_rx_sync_size(new_mtu);
933 int rx_skb_size = emac_rx_skb_size(new_mtu);
936 mutex_lock(&dev->link_lock);
937 emac_netif_stop(dev);
938 emac_rx_disable(dev);
939 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
941 if (dev->rx_sg_skb) {
942 ++dev->estats.rx_dropped_resize;
943 dev_kfree_skb(dev->rx_sg_skb);
944 dev->rx_sg_skb = NULL;
947 /* Make a first pass over RX ring and mark BDs ready, dropping
948 * non-processed packets on the way. We need this as a separate pass
949 * to simplify error recovery in the case of allocation failure later.
951 for (i = 0; i < NUM_RX_BUFF; ++i) {
952 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
953 ++dev->estats.rx_dropped_resize;
955 dev->rx_desc[i].data_len = 0;
956 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
957 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
960 /* Reallocate RX ring only if bigger skb buffers are required */
961 if (rx_skb_size <= dev->rx_skb_size)
964 /* Second pass, allocate new skbs */
965 for (i = 0; i < NUM_RX_BUFF; ++i) {
966 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
972 BUG_ON(!dev->rx_skb[i]);
973 dev_kfree_skb(dev->rx_skb[i]);
975 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
976 dev->rx_desc[i].data_ptr =
977 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
978 DMA_FROM_DEVICE) + 2;
979 dev->rx_skb[i] = skb;
982 /* Check if we need to change "Jumbo" bit in MR1 */
983 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
984 /* This is to prevent starting RX channel in emac_rx_enable() */
985 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
987 dev->ndev->mtu = new_mtu;
988 emac_full_tx_reset(dev);
991 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
994 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
996 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
998 emac_netif_start(dev);
999 mutex_unlock(&dev->link_lock);
1004 /* Process ctx, rtnl_lock semaphore */
1005 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1007 struct emac_instance *dev = netdev_priv(ndev);
1010 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1013 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1015 if (netif_running(ndev)) {
1016 /* Check if we really need to reinitalize RX ring */
1017 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1018 ret = emac_resize_rx_ring(dev, new_mtu);
1022 ndev->mtu = new_mtu;
1023 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1024 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1030 static void emac_clean_tx_ring(struct emac_instance *dev)
1034 for (i = 0; i < NUM_TX_BUFF; ++i) {
1035 if (dev->tx_skb[i]) {
1036 dev_kfree_skb(dev->tx_skb[i]);
1037 dev->tx_skb[i] = NULL;
1038 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1039 ++dev->estats.tx_dropped;
1041 dev->tx_desc[i].ctrl = 0;
1042 dev->tx_desc[i].data_ptr = 0;
1046 static void emac_clean_rx_ring(struct emac_instance *dev)
1050 for (i = 0; i < NUM_RX_BUFF; ++i)
1051 if (dev->rx_skb[i]) {
1052 dev->rx_desc[i].ctrl = 0;
1053 dev_kfree_skb(dev->rx_skb[i]);
1054 dev->rx_skb[i] = NULL;
1055 dev->rx_desc[i].data_ptr = 0;
1058 if (dev->rx_sg_skb) {
1059 dev_kfree_skb(dev->rx_sg_skb);
1060 dev->rx_sg_skb = NULL;
1064 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1067 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1071 dev->rx_skb[slot] = skb;
1072 dev->rx_desc[slot].data_len = 0;
1074 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1075 dev->rx_desc[slot].data_ptr =
1076 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1077 DMA_FROM_DEVICE) + 2;
1079 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1080 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1085 static void emac_print_link_status(struct emac_instance *dev)
1087 if (netif_carrier_ok(dev->ndev))
1088 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1089 dev->ndev->name, dev->phy.speed,
1090 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1091 dev->phy.pause ? ", pause enabled" :
1092 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1094 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1097 /* Process ctx, rtnl_lock semaphore */
1098 static int emac_open(struct net_device *ndev)
1100 struct emac_instance *dev = netdev_priv(ndev);
1103 DBG(dev, "open" NL);
1105 /* Setup error IRQ handler */
1106 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1108 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1109 ndev->name, dev->emac_irq);
1113 /* Allocate RX ring */
1114 for (i = 0; i < NUM_RX_BUFF; ++i)
1115 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1116 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1121 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1122 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1123 dev->rx_sg_skb = NULL;
1125 mutex_lock(&dev->link_lock);
1128 /* Start PHY polling now.
1130 if (dev->phy.address >= 0) {
1131 int link_poll_interval;
1132 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1133 dev->phy.def->ops->read_link(&dev->phy);
1134 emac_rx_clk_default(dev);
1135 netif_carrier_on(dev->ndev);
1136 link_poll_interval = PHY_POLL_LINK_ON;
1138 emac_rx_clk_tx(dev);
1139 netif_carrier_off(dev->ndev);
1140 link_poll_interval = PHY_POLL_LINK_OFF;
1142 dev->link_polling = 1;
1144 schedule_delayed_work(&dev->link_work, link_poll_interval);
1145 emac_print_link_status(dev);
1147 netif_carrier_on(dev->ndev);
1149 emac_configure(dev);
1150 mal_poll_add(dev->mal, &dev->commac);
1151 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1152 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1153 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1154 emac_tx_enable(dev);
1155 emac_rx_enable(dev);
1156 emac_netif_start(dev);
1158 mutex_unlock(&dev->link_lock);
1162 emac_clean_rx_ring(dev);
1163 free_irq(dev->emac_irq, dev);
1170 static int emac_link_differs(struct emac_instance *dev)
1172 u32 r = in_be32(&dev->emacp->mr1);
1174 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1175 int speed, pause, asym_pause;
1177 if (r & EMAC_MR1_MF_1000)
1179 else if (r & EMAC_MR1_MF_100)
1184 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1185 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1194 pause = asym_pause = 0;
1196 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1197 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1201 static void emac_link_timer(struct work_struct *work)
1203 struct emac_instance *dev =
1204 container_of((struct delayed_work *)work,
1205 struct emac_instance, link_work);
1206 int link_poll_interval;
1208 mutex_lock(&dev->link_lock);
1209 DBG2(dev, "link timer" NL);
1214 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1215 if (!netif_carrier_ok(dev->ndev)) {
1216 emac_rx_clk_default(dev);
1217 /* Get new link parameters */
1218 dev->phy.def->ops->read_link(&dev->phy);
1220 netif_carrier_on(dev->ndev);
1221 emac_netif_stop(dev);
1222 emac_full_tx_reset(dev);
1223 emac_netif_start(dev);
1224 emac_print_link_status(dev);
1226 link_poll_interval = PHY_POLL_LINK_ON;
1228 if (netif_carrier_ok(dev->ndev)) {
1229 emac_rx_clk_tx(dev);
1230 netif_carrier_off(dev->ndev);
1231 netif_tx_disable(dev->ndev);
1232 emac_reinitialize(dev);
1233 emac_print_link_status(dev);
1235 link_poll_interval = PHY_POLL_LINK_OFF;
1237 schedule_delayed_work(&dev->link_work, link_poll_interval);
1239 mutex_unlock(&dev->link_lock);
1242 static void emac_force_link_update(struct emac_instance *dev)
1244 netif_carrier_off(dev->ndev);
1246 if (dev->link_polling) {
1247 cancel_rearming_delayed_work(&dev->link_work);
1248 if (dev->link_polling)
1249 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1253 /* Process ctx, rtnl_lock semaphore */
1254 static int emac_close(struct net_device *ndev)
1256 struct emac_instance *dev = netdev_priv(ndev);
1258 DBG(dev, "close" NL);
1260 if (dev->phy.address >= 0) {
1261 dev->link_polling = 0;
1262 cancel_rearming_delayed_work(&dev->link_work);
1264 mutex_lock(&dev->link_lock);
1265 emac_netif_stop(dev);
1267 mutex_unlock(&dev->link_lock);
1269 emac_rx_disable(dev);
1270 emac_tx_disable(dev);
1271 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1272 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1273 mal_poll_del(dev->mal, &dev->commac);
1275 emac_clean_tx_ring(dev);
1276 emac_clean_rx_ring(dev);
1278 free_irq(dev->emac_irq, dev);
1283 static inline u16 emac_tx_csum(struct emac_instance *dev,
1284 struct sk_buff *skb)
1286 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1287 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1288 ++dev->stats.tx_packets_csum;
1289 return EMAC_TX_CTRL_TAH_CSUM;
1294 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1296 struct emac_regs __iomem *p = dev->emacp;
1297 struct net_device *ndev = dev->ndev;
1299 /* Send the packet out. If the if makes a significant perf
1300 * difference, then we can store the TMR0 value in "dev"
1303 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1304 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1306 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1308 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1309 netif_stop_queue(ndev);
1310 DBG2(dev, "stopped TX queue" NL);
1313 ndev->trans_start = jiffies;
1314 ++dev->stats.tx_packets;
1315 dev->stats.tx_bytes += len;
1321 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1323 struct emac_instance *dev = netdev_priv(ndev);
1324 unsigned int len = skb->len;
1327 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1328 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1330 slot = dev->tx_slot++;
1331 if (dev->tx_slot == NUM_TX_BUFF) {
1333 ctrl |= MAL_TX_CTRL_WRAP;
1336 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1338 dev->tx_skb[slot] = skb;
1339 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1342 dev->tx_desc[slot].data_len = (u16) len;
1344 dev->tx_desc[slot].ctrl = ctrl;
1346 return emac_xmit_finish(dev, len);
1349 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1350 u32 pd, int len, int last, u16 base_ctrl)
1353 u16 ctrl = base_ctrl;
1354 int chunk = min(len, MAL_MAX_TX_SIZE);
1357 slot = (slot + 1) % NUM_TX_BUFF;
1360 ctrl |= MAL_TX_CTRL_LAST;
1361 if (slot == NUM_TX_BUFF - 1)
1362 ctrl |= MAL_TX_CTRL_WRAP;
1364 dev->tx_skb[slot] = NULL;
1365 dev->tx_desc[slot].data_ptr = pd;
1366 dev->tx_desc[slot].data_len = (u16) chunk;
1367 dev->tx_desc[slot].ctrl = ctrl;
1378 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1379 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1381 struct emac_instance *dev = netdev_priv(ndev);
1382 int nr_frags = skb_shinfo(skb)->nr_frags;
1383 int len = skb->len, chunk;
1388 /* This is common "fast" path */
1389 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1390 return emac_start_xmit(skb, ndev);
1392 len -= skb->data_len;
1394 /* Note, this is only an *estimation*, we can still run out of empty
1395 * slots because of the additional fragmentation into
1396 * MAL_MAX_TX_SIZE-sized chunks
1398 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1401 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1402 emac_tx_csum(dev, skb);
1403 slot = dev->tx_slot;
1406 dev->tx_skb[slot] = NULL;
1407 chunk = min(len, MAL_MAX_TX_SIZE);
1408 dev->tx_desc[slot].data_ptr = pd =
1409 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1410 dev->tx_desc[slot].data_len = (u16) chunk;
1413 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1416 for (i = 0; i < nr_frags; ++i) {
1417 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1420 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1423 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1426 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1430 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1432 /* Attach skb to the last slot so we don't release it too early */
1433 dev->tx_skb[slot] = skb;
1435 /* Send the packet out */
1436 if (dev->tx_slot == NUM_TX_BUFF - 1)
1437 ctrl |= MAL_TX_CTRL_WRAP;
1439 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1440 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1442 return emac_xmit_finish(dev, skb->len);
1445 /* Well, too bad. Our previous estimation was overly optimistic.
1448 while (slot != dev->tx_slot) {
1449 dev->tx_desc[slot].ctrl = 0;
1452 slot = NUM_TX_BUFF - 1;
1454 ++dev->estats.tx_undo;
1457 netif_stop_queue(ndev);
1458 DBG2(dev, "stopped TX queue" NL);
1463 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1465 struct emac_error_stats *st = &dev->estats;
1467 DBG(dev, "BD TX error %04x" NL, ctrl);
1470 if (ctrl & EMAC_TX_ST_BFCS)
1471 ++st->tx_bd_bad_fcs;
1472 if (ctrl & EMAC_TX_ST_LCS)
1473 ++st->tx_bd_carrier_loss;
1474 if (ctrl & EMAC_TX_ST_ED)
1475 ++st->tx_bd_excessive_deferral;
1476 if (ctrl & EMAC_TX_ST_EC)
1477 ++st->tx_bd_excessive_collisions;
1478 if (ctrl & EMAC_TX_ST_LC)
1479 ++st->tx_bd_late_collision;
1480 if (ctrl & EMAC_TX_ST_MC)
1481 ++st->tx_bd_multple_collisions;
1482 if (ctrl & EMAC_TX_ST_SC)
1483 ++st->tx_bd_single_collision;
1484 if (ctrl & EMAC_TX_ST_UR)
1485 ++st->tx_bd_underrun;
1486 if (ctrl & EMAC_TX_ST_SQE)
1490 static void emac_poll_tx(void *param)
1492 struct emac_instance *dev = param;
1495 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1497 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1498 bad_mask = EMAC_IS_BAD_TX_TAH;
1500 bad_mask = EMAC_IS_BAD_TX;
1502 netif_tx_lock_bh(dev->ndev);
1505 int slot = dev->ack_slot, n = 0;
1507 ctrl = dev->tx_desc[slot].ctrl;
1508 if (!(ctrl & MAL_TX_CTRL_READY)) {
1509 struct sk_buff *skb = dev->tx_skb[slot];
1514 dev->tx_skb[slot] = NULL;
1516 slot = (slot + 1) % NUM_TX_BUFF;
1518 if (unlikely(ctrl & bad_mask))
1519 emac_parse_tx_error(dev, ctrl);
1525 dev->ack_slot = slot;
1526 if (netif_queue_stopped(dev->ndev) &&
1527 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1528 netif_wake_queue(dev->ndev);
1530 DBG2(dev, "tx %d pkts" NL, n);
1533 netif_tx_unlock_bh(dev->ndev);
1536 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1539 struct sk_buff *skb = dev->rx_skb[slot];
1541 DBG2(dev, "recycle %d %d" NL, slot, len);
1544 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1545 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1547 dev->rx_desc[slot].data_len = 0;
1549 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1550 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1553 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1555 struct emac_error_stats *st = &dev->estats;
1557 DBG(dev, "BD RX error %04x" NL, ctrl);
1560 if (ctrl & EMAC_RX_ST_OE)
1561 ++st->rx_bd_overrun;
1562 if (ctrl & EMAC_RX_ST_BP)
1563 ++st->rx_bd_bad_packet;
1564 if (ctrl & EMAC_RX_ST_RP)
1565 ++st->rx_bd_runt_packet;
1566 if (ctrl & EMAC_RX_ST_SE)
1567 ++st->rx_bd_short_event;
1568 if (ctrl & EMAC_RX_ST_AE)
1569 ++st->rx_bd_alignment_error;
1570 if (ctrl & EMAC_RX_ST_BFCS)
1571 ++st->rx_bd_bad_fcs;
1572 if (ctrl & EMAC_RX_ST_PTL)
1573 ++st->rx_bd_packet_too_long;
1574 if (ctrl & EMAC_RX_ST_ORE)
1575 ++st->rx_bd_out_of_range;
1576 if (ctrl & EMAC_RX_ST_IRE)
1577 ++st->rx_bd_in_range;
1580 static inline void emac_rx_csum(struct emac_instance *dev,
1581 struct sk_buff *skb, u16 ctrl)
1583 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1584 if (!ctrl && dev->tah_dev) {
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 ++dev->stats.rx_packets_csum;
1591 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1593 if (likely(dev->rx_sg_skb != NULL)) {
1594 int len = dev->rx_desc[slot].data_len;
1595 int tot_len = dev->rx_sg_skb->len + len;
1597 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1598 ++dev->estats.rx_dropped_mtu;
1599 dev_kfree_skb(dev->rx_sg_skb);
1600 dev->rx_sg_skb = NULL;
1602 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1603 dev->rx_skb[slot]->data, len);
1604 skb_put(dev->rx_sg_skb, len);
1605 emac_recycle_rx_skb(dev, slot, len);
1609 emac_recycle_rx_skb(dev, slot, 0);
1613 /* NAPI poll context */
1614 static int emac_poll_rx(void *param, int budget)
1616 struct emac_instance *dev = param;
1617 int slot = dev->rx_slot, received = 0;
1619 DBG2(dev, "poll_rx(%d)" NL, budget);
1622 while (budget > 0) {
1624 struct sk_buff *skb;
1625 u16 ctrl = dev->rx_desc[slot].ctrl;
1627 if (ctrl & MAL_RX_CTRL_EMPTY)
1630 skb = dev->rx_skb[slot];
1632 len = dev->rx_desc[slot].data_len;
1634 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1637 ctrl &= EMAC_BAD_RX_MASK;
1638 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1639 emac_parse_rx_error(dev, ctrl);
1640 ++dev->estats.rx_dropped_error;
1641 emac_recycle_rx_skb(dev, slot, 0);
1646 if (len < ETH_HLEN) {
1647 ++dev->estats.rx_dropped_stack;
1648 emac_recycle_rx_skb(dev, slot, len);
1652 if (len && len < EMAC_RX_COPY_THRESH) {
1653 struct sk_buff *copy_skb =
1654 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1655 if (unlikely(!copy_skb))
1658 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1659 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1661 emac_recycle_rx_skb(dev, slot, len);
1663 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1668 skb->dev = dev->ndev;
1669 skb->protocol = eth_type_trans(skb, dev->ndev);
1670 emac_rx_csum(dev, skb, ctrl);
1672 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1673 ++dev->estats.rx_dropped_stack;
1675 ++dev->stats.rx_packets;
1677 dev->stats.rx_bytes += len;
1678 slot = (slot + 1) % NUM_RX_BUFF;
1683 if (ctrl & MAL_RX_CTRL_FIRST) {
1684 BUG_ON(dev->rx_sg_skb);
1685 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1686 DBG(dev, "rx OOM %d" NL, slot);
1687 ++dev->estats.rx_dropped_oom;
1688 emac_recycle_rx_skb(dev, slot, 0);
1690 dev->rx_sg_skb = skb;
1693 } else if (!emac_rx_sg_append(dev, slot) &&
1694 (ctrl & MAL_RX_CTRL_LAST)) {
1696 skb = dev->rx_sg_skb;
1697 dev->rx_sg_skb = NULL;
1699 ctrl &= EMAC_BAD_RX_MASK;
1700 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1701 emac_parse_rx_error(dev, ctrl);
1702 ++dev->estats.rx_dropped_error;
1710 DBG(dev, "rx OOM %d" NL, slot);
1711 /* Drop the packet and recycle skb */
1712 ++dev->estats.rx_dropped_oom;
1713 emac_recycle_rx_skb(dev, slot, 0);
1718 DBG2(dev, "rx %d BDs" NL, received);
1719 dev->rx_slot = slot;
1722 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1724 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1725 DBG2(dev, "rx restart" NL);
1730 if (dev->rx_sg_skb) {
1731 DBG2(dev, "dropping partial rx packet" NL);
1732 ++dev->estats.rx_dropped_error;
1733 dev_kfree_skb(dev->rx_sg_skb);
1734 dev->rx_sg_skb = NULL;
1737 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1738 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1739 emac_rx_enable(dev);
1745 /* NAPI poll context */
1746 static int emac_peek_rx(void *param)
1748 struct emac_instance *dev = param;
1750 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1753 /* NAPI poll context */
1754 static int emac_peek_rx_sg(void *param)
1756 struct emac_instance *dev = param;
1758 int slot = dev->rx_slot;
1760 u16 ctrl = dev->rx_desc[slot].ctrl;
1761 if (ctrl & MAL_RX_CTRL_EMPTY)
1763 else if (ctrl & MAL_RX_CTRL_LAST)
1766 slot = (slot + 1) % NUM_RX_BUFF;
1768 /* I'm just being paranoid here :) */
1769 if (unlikely(slot == dev->rx_slot))
1775 static void emac_rxde(void *param)
1777 struct emac_instance *dev = param;
1779 ++dev->estats.rx_stopped;
1780 emac_rx_disable_async(dev);
1784 static irqreturn_t emac_irq(int irq, void *dev_instance)
1786 struct emac_instance *dev = dev_instance;
1787 struct emac_regs __iomem *p = dev->emacp;
1788 struct emac_error_stats *st = &dev->estats;
1791 spin_lock(&dev->lock);
1793 isr = in_be32(&p->isr);
1794 out_be32(&p->isr, isr);
1796 DBG(dev, "isr = %08x" NL, isr);
1798 if (isr & EMAC4_ISR_TXPE)
1800 if (isr & EMAC4_ISR_RXPE)
1802 if (isr & EMAC4_ISR_TXUE)
1804 if (isr & EMAC4_ISR_RXOE)
1805 ++st->rx_fifo_overrun;
1806 if (isr & EMAC_ISR_OVR)
1808 if (isr & EMAC_ISR_BP)
1809 ++st->rx_bad_packet;
1810 if (isr & EMAC_ISR_RP)
1811 ++st->rx_runt_packet;
1812 if (isr & EMAC_ISR_SE)
1813 ++st->rx_short_event;
1814 if (isr & EMAC_ISR_ALE)
1815 ++st->rx_alignment_error;
1816 if (isr & EMAC_ISR_BFCS)
1818 if (isr & EMAC_ISR_PTLE)
1819 ++st->rx_packet_too_long;
1820 if (isr & EMAC_ISR_ORE)
1821 ++st->rx_out_of_range;
1822 if (isr & EMAC_ISR_IRE)
1824 if (isr & EMAC_ISR_SQE)
1826 if (isr & EMAC_ISR_TE)
1829 spin_unlock(&dev->lock);
1834 static struct net_device_stats *emac_stats(struct net_device *ndev)
1836 struct emac_instance *dev = netdev_priv(ndev);
1837 struct emac_stats *st = &dev->stats;
1838 struct emac_error_stats *est = &dev->estats;
1839 struct net_device_stats *nst = &dev->nstats;
1840 unsigned long flags;
1842 DBG2(dev, "stats" NL);
1844 /* Compute "legacy" statistics */
1845 spin_lock_irqsave(&dev->lock, flags);
1846 nst->rx_packets = (unsigned long)st->rx_packets;
1847 nst->rx_bytes = (unsigned long)st->rx_bytes;
1848 nst->tx_packets = (unsigned long)st->tx_packets;
1849 nst->tx_bytes = (unsigned long)st->tx_bytes;
1850 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1851 est->rx_dropped_error +
1852 est->rx_dropped_resize +
1853 est->rx_dropped_mtu);
1854 nst->tx_dropped = (unsigned long)est->tx_dropped;
1856 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1857 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1858 est->rx_fifo_overrun +
1860 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1861 est->rx_alignment_error);
1862 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1864 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1865 est->rx_bd_short_event +
1866 est->rx_bd_packet_too_long +
1867 est->rx_bd_out_of_range +
1868 est->rx_bd_in_range +
1869 est->rx_runt_packet +
1870 est->rx_short_event +
1871 est->rx_packet_too_long +
1872 est->rx_out_of_range +
1875 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1876 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1878 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1879 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1880 est->tx_bd_excessive_collisions +
1881 est->tx_bd_late_collision +
1882 est->tx_bd_multple_collisions);
1883 spin_unlock_irqrestore(&dev->lock, flags);
1887 static struct mal_commac_ops emac_commac_ops = {
1888 .poll_tx = &emac_poll_tx,
1889 .poll_rx = &emac_poll_rx,
1890 .peek_rx = &emac_peek_rx,
1894 static struct mal_commac_ops emac_commac_sg_ops = {
1895 .poll_tx = &emac_poll_tx,
1896 .poll_rx = &emac_poll_rx,
1897 .peek_rx = &emac_peek_rx_sg,
1901 /* Ethtool support */
1902 static int emac_ethtool_get_settings(struct net_device *ndev,
1903 struct ethtool_cmd *cmd)
1905 struct emac_instance *dev = netdev_priv(ndev);
1907 cmd->supported = dev->phy.features;
1908 cmd->port = PORT_MII;
1909 cmd->phy_address = dev->phy.address;
1911 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1913 mutex_lock(&dev->link_lock);
1914 cmd->advertising = dev->phy.advertising;
1915 cmd->autoneg = dev->phy.autoneg;
1916 cmd->speed = dev->phy.speed;
1917 cmd->duplex = dev->phy.duplex;
1918 mutex_unlock(&dev->link_lock);
1923 static int emac_ethtool_set_settings(struct net_device *ndev,
1924 struct ethtool_cmd *cmd)
1926 struct emac_instance *dev = netdev_priv(ndev);
1927 u32 f = dev->phy.features;
1929 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1930 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1932 /* Basic sanity checks */
1933 if (dev->phy.address < 0)
1935 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1937 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1939 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1942 if (cmd->autoneg == AUTONEG_DISABLE) {
1943 switch (cmd->speed) {
1945 if (cmd->duplex == DUPLEX_HALF
1946 && !(f & SUPPORTED_10baseT_Half))
1948 if (cmd->duplex == DUPLEX_FULL
1949 && !(f & SUPPORTED_10baseT_Full))
1953 if (cmd->duplex == DUPLEX_HALF
1954 && !(f & SUPPORTED_100baseT_Half))
1956 if (cmd->duplex == DUPLEX_FULL
1957 && !(f & SUPPORTED_100baseT_Full))
1961 if (cmd->duplex == DUPLEX_HALF
1962 && !(f & SUPPORTED_1000baseT_Half))
1964 if (cmd->duplex == DUPLEX_FULL
1965 && !(f & SUPPORTED_1000baseT_Full))
1972 mutex_lock(&dev->link_lock);
1973 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1975 mutex_unlock(&dev->link_lock);
1978 if (!(f & SUPPORTED_Autoneg))
1981 mutex_lock(&dev->link_lock);
1982 dev->phy.def->ops->setup_aneg(&dev->phy,
1983 (cmd->advertising & f) |
1984 (dev->phy.advertising &
1986 ADVERTISED_Asym_Pause)));
1987 mutex_unlock(&dev->link_lock);
1989 emac_force_link_update(dev);
1994 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1995 struct ethtool_ringparam *rp)
1997 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1998 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2001 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2002 struct ethtool_pauseparam *pp)
2004 struct emac_instance *dev = netdev_priv(ndev);
2006 mutex_lock(&dev->link_lock);
2007 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2008 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2011 if (dev->phy.duplex == DUPLEX_FULL) {
2013 pp->rx_pause = pp->tx_pause = 1;
2014 else if (dev->phy.asym_pause)
2017 mutex_unlock(&dev->link_lock);
2020 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2022 struct emac_instance *dev = netdev_priv(ndev);
2024 return dev->tah_dev != NULL;
2027 static int emac_get_regs_len(struct emac_instance *dev)
2029 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2030 return sizeof(struct emac_ethtool_regs_subhdr) +
2031 EMAC4_ETHTOOL_REGS_SIZE(dev);
2033 return sizeof(struct emac_ethtool_regs_subhdr) +
2034 EMAC_ETHTOOL_REGS_SIZE(dev);
2037 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2039 struct emac_instance *dev = netdev_priv(ndev);
2042 size = sizeof(struct emac_ethtool_regs_hdr) +
2043 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2044 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2045 size += zmii_get_regs_len(dev->zmii_dev);
2046 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2047 size += rgmii_get_regs_len(dev->rgmii_dev);
2048 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2049 size += tah_get_regs_len(dev->tah_dev);
2054 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2056 struct emac_ethtool_regs_subhdr *hdr = buf;
2058 hdr->index = dev->cell_index;
2059 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2060 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2061 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2062 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2064 hdr->version = EMAC_ETHTOOL_REGS_VER;
2065 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2066 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2070 static void emac_ethtool_get_regs(struct net_device *ndev,
2071 struct ethtool_regs *regs, void *buf)
2073 struct emac_instance *dev = netdev_priv(ndev);
2074 struct emac_ethtool_regs_hdr *hdr = buf;
2076 hdr->components = 0;
2079 buf = mal_dump_regs(dev->mal, buf);
2080 buf = emac_dump_regs(dev, buf);
2081 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2082 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2083 buf = zmii_dump_regs(dev->zmii_dev, buf);
2085 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2086 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2087 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2089 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2090 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2091 buf = tah_dump_regs(dev->tah_dev, buf);
2095 static int emac_ethtool_nway_reset(struct net_device *ndev)
2097 struct emac_instance *dev = netdev_priv(ndev);
2100 DBG(dev, "nway_reset" NL);
2102 if (dev->phy.address < 0)
2105 mutex_lock(&dev->link_lock);
2106 if (!dev->phy.autoneg) {
2111 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2113 mutex_unlock(&dev->link_lock);
2114 emac_force_link_update(dev);
2118 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2120 return EMAC_ETHTOOL_STATS_COUNT;
2123 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2126 if (stringset == ETH_SS_STATS)
2127 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2130 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2131 struct ethtool_stats *estats,
2134 struct emac_instance *dev = netdev_priv(ndev);
2136 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2137 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2138 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2141 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2142 struct ethtool_drvinfo *info)
2144 struct emac_instance *dev = netdev_priv(ndev);
2146 strcpy(info->driver, "ibm_emac");
2147 strcpy(info->version, DRV_VERSION);
2148 info->fw_version[0] = '\0';
2149 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2150 dev->cell_index, dev->ofdev->node->full_name);
2151 info->n_stats = emac_ethtool_get_stats_count(ndev);
2152 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2155 static const struct ethtool_ops emac_ethtool_ops = {
2156 .get_settings = emac_ethtool_get_settings,
2157 .set_settings = emac_ethtool_set_settings,
2158 .get_drvinfo = emac_ethtool_get_drvinfo,
2160 .get_regs_len = emac_ethtool_get_regs_len,
2161 .get_regs = emac_ethtool_get_regs,
2163 .nway_reset = emac_ethtool_nway_reset,
2165 .get_ringparam = emac_ethtool_get_ringparam,
2166 .get_pauseparam = emac_ethtool_get_pauseparam,
2168 .get_rx_csum = emac_ethtool_get_rx_csum,
2170 .get_strings = emac_ethtool_get_strings,
2171 .get_stats_count = emac_ethtool_get_stats_count,
2172 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2174 .get_link = ethtool_op_get_link,
2175 .get_tx_csum = ethtool_op_get_tx_csum,
2176 .get_sg = ethtool_op_get_sg,
2179 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2181 struct emac_instance *dev = netdev_priv(ndev);
2182 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2184 DBG(dev, "ioctl %08x" NL, cmd);
2186 if (dev->phy.address < 0)
2191 case SIOCDEVPRIVATE:
2192 data[0] = dev->phy.address;
2195 case SIOCDEVPRIVATE + 1:
2196 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2200 case SIOCDEVPRIVATE + 2:
2201 if (!capable(CAP_NET_ADMIN))
2203 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2210 struct emac_depentry {
2212 struct device_node *node;
2213 struct of_device *ofdev;
2217 #define EMAC_DEP_MAL_IDX 0
2218 #define EMAC_DEP_ZMII_IDX 1
2219 #define EMAC_DEP_RGMII_IDX 2
2220 #define EMAC_DEP_TAH_IDX 3
2221 #define EMAC_DEP_MDIO_IDX 4
2222 #define EMAC_DEP_PREV_IDX 5
2223 #define EMAC_DEP_COUNT 6
2225 static int __devinit emac_check_deps(struct emac_instance *dev,
2226 struct emac_depentry *deps)
2229 struct device_node *np;
2231 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2232 /* no dependency on that item, allright */
2233 if (deps[i].phandle == 0) {
2237 /* special case for blist as the dependency might go away */
2238 if (i == EMAC_DEP_PREV_IDX) {
2239 np = *(dev->blist - 1);
2241 deps[i].phandle = 0;
2245 if (deps[i].node == NULL)
2246 deps[i].node = of_node_get(np);
2248 if (deps[i].node == NULL)
2249 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2250 if (deps[i].node == NULL)
2252 if (deps[i].ofdev == NULL)
2253 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2254 if (deps[i].ofdev == NULL)
2256 if (deps[i].drvdata == NULL)
2257 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2258 if (deps[i].drvdata != NULL)
2261 return (there == EMAC_DEP_COUNT);
2264 static void emac_put_deps(struct emac_instance *dev)
2267 of_dev_put(dev->mal_dev);
2269 of_dev_put(dev->zmii_dev);
2271 of_dev_put(dev->rgmii_dev);
2273 of_dev_put(dev->mdio_dev);
2275 of_dev_put(dev->tah_dev);
2278 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2279 unsigned long action, void *data)
2281 /* We are only intereted in device addition */
2282 if (action == BUS_NOTIFY_BOUND_DRIVER)
2283 wake_up_all(&emac_probe_wait);
2287 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2288 .notifier_call = emac_of_bus_notify
2291 static int __devinit emac_wait_deps(struct emac_instance *dev)
2293 struct emac_depentry deps[EMAC_DEP_COUNT];
2296 memset(&deps, 0, sizeof(deps));
2298 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2299 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2300 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2302 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2304 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2305 if (dev->blist && dev->blist > emac_boot_list)
2306 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2307 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2308 wait_event_timeout(emac_probe_wait,
2309 emac_check_deps(dev, deps),
2310 EMAC_PROBE_DEP_TIMEOUT);
2311 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2312 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2313 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2315 of_node_put(deps[i].node);
2316 if (err && deps[i].ofdev)
2317 of_dev_put(deps[i].ofdev);
2320 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2321 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2322 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2323 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2324 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2326 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2327 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2331 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2332 u32 *val, int fatal)
2335 const u32 *prop = of_get_property(np, name, &len);
2336 if (prop == NULL || len < sizeof(u32)) {
2338 printk(KERN_ERR "%s: missing %s property\n",
2339 np->full_name, name);
2346 static int __devinit emac_init_phy(struct emac_instance *dev)
2348 struct device_node *np = dev->ofdev->node;
2349 struct net_device *ndev = dev->ndev;
2353 dev->phy.dev = ndev;
2354 dev->phy.mode = dev->phy_mode;
2356 /* PHY-less configuration.
2357 * XXX I probably should move these settings to the dev tree
2359 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2362 /* PHY-less configuration.
2363 * XXX I probably should move these settings to the dev tree
2365 dev->phy.address = -1;
2366 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2372 mutex_lock(&emac_phy_map_lock);
2373 phy_map = dev->phy_map | busy_phy_map;
2375 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2377 dev->phy.mdio_read = emac_mdio_read;
2378 dev->phy.mdio_write = emac_mdio_write;
2380 /* Enable internal clock source */
2381 #ifdef CONFIG_PPC_DCR_NATIVE
2382 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2383 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2385 /* PHY clock workaround */
2386 emac_rx_clk_tx(dev);
2388 /* Enable internal clock source on 440GX*/
2389 #ifdef CONFIG_PPC_DCR_NATIVE
2390 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2391 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2393 /* Configure EMAC with defaults so we can at least use MDIO
2394 * This is needed mostly for 440GX
2396 if (emac_phy_gpcs(dev->phy.mode)) {
2398 * Make GPCS PHY address equal to EMAC index.
2399 * We probably should take into account busy_phy_map
2400 * and/or phy_map here.
2402 * Note that the busy_phy_map is currently global
2403 * while it should probably be per-ASIC...
2405 dev->phy.address = dev->cell_index;
2408 emac_configure(dev);
2410 if (dev->phy_address != 0xffffffff)
2411 phy_map = ~(1 << dev->phy_address);
2413 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2414 if (!(phy_map & 1)) {
2416 busy_phy_map |= 1 << i;
2418 /* Quick check if there is a PHY at the address */
2419 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2420 if (r == 0xffff || r < 0)
2422 if (!emac_mii_phy_probe(&dev->phy, i))
2426 /* Enable external clock source */
2427 #ifdef CONFIG_PPC_DCR_NATIVE
2428 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2429 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2431 mutex_unlock(&emac_phy_map_lock);
2433 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2438 if (dev->phy.def->ops->init)
2439 dev->phy.def->ops->init(&dev->phy);
2441 /* Disable any PHY features not supported by the platform */
2442 dev->phy.def->features &= ~dev->phy_feat_exc;
2444 /* Setup initial link parameters */
2445 if (dev->phy.features & SUPPORTED_Autoneg) {
2446 adv = dev->phy.features;
2447 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2448 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2449 /* Restart autonegotiation */
2450 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2452 u32 f = dev->phy.def->features;
2453 int speed = SPEED_10, fd = DUPLEX_HALF;
2455 /* Select highest supported speed/duplex */
2456 if (f & SUPPORTED_1000baseT_Full) {
2459 } else if (f & SUPPORTED_1000baseT_Half)
2461 else if (f & SUPPORTED_100baseT_Full) {
2464 } else if (f & SUPPORTED_100baseT_Half)
2466 else if (f & SUPPORTED_10baseT_Full)
2469 /* Force link parameters */
2470 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2475 static int __devinit emac_init_config(struct emac_instance *dev)
2477 struct device_node *np = dev->ofdev->node;
2480 const char *pm, *phy_modes[] = {
2482 [PHY_MODE_MII] = "mii",
2483 [PHY_MODE_RMII] = "rmii",
2484 [PHY_MODE_SMII] = "smii",
2485 [PHY_MODE_RGMII] = "rgmii",
2486 [PHY_MODE_TBI] = "tbi",
2487 [PHY_MODE_GMII] = "gmii",
2488 [PHY_MODE_RTBI] = "rtbi",
2489 [PHY_MODE_SGMII] = "sgmii",
2492 /* Read config from device-tree */
2493 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2495 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2497 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2499 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2501 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2502 dev->max_mtu = 1500;
2503 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2504 dev->rx_fifo_size = 2048;
2505 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2506 dev->tx_fifo_size = 2048;
2507 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2508 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2509 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2510 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2511 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2512 dev->phy_address = 0xffffffff;
2513 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2514 dev->phy_map = 0xffffffff;
2515 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2517 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2519 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2521 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2523 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2525 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2526 dev->zmii_port = 0xffffffff;;
2527 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2529 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2530 dev->rgmii_port = 0xffffffff;;
2531 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2532 dev->fifo_entry_size = 16;
2533 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2534 dev->mal_burst_size = 256;
2536 /* PHY mode needs some decoding */
2537 dev->phy_mode = PHY_MODE_NA;
2538 pm = of_get_property(np, "phy-mode", &plen);
2541 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2542 if (!strcasecmp(pm, phy_modes[i])) {
2548 /* Backward compat with non-final DT */
2549 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2550 u32 nmode = *(const u32 *)pm;
2551 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2552 dev->phy_mode = nmode;
2555 /* Check EMAC version */
2556 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2557 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2558 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2559 dev->features |= EMAC_FTR_EMAC4;
2560 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2561 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2563 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2564 of_device_is_compatible(np, "ibm,emac-440gr"))
2565 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2568 /* Fixup some feature bits based on the device tree */
2569 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2570 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2571 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2572 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2574 /* CAB lacks the appropriate properties */
2575 if (of_device_is_compatible(np, "ibm,emac-axon"))
2576 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2577 EMAC_FTR_STACR_OC_INVERT;
2579 /* Enable TAH/ZMII/RGMII features as found */
2580 if (dev->tah_ph != 0) {
2581 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2582 dev->features |= EMAC_FTR_HAS_TAH;
2584 printk(KERN_ERR "%s: TAH support not enabled !\n",
2590 if (dev->zmii_ph != 0) {
2591 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2592 dev->features |= EMAC_FTR_HAS_ZMII;
2594 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2600 if (dev->rgmii_ph != 0) {
2601 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2602 dev->features |= EMAC_FTR_HAS_RGMII;
2604 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2610 /* Read MAC-address */
2611 p = of_get_property(np, "local-mac-address", NULL);
2613 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2617 memcpy(dev->ndev->dev_addr, p, 6);
2619 /* IAHT and GAHT filter parameterization */
2620 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2621 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2622 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2624 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2625 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2628 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2629 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2630 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2631 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2632 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2637 static int __devinit emac_probe(struct of_device *ofdev,
2638 const struct of_device_id *match)
2640 struct net_device *ndev;
2641 struct emac_instance *dev;
2642 struct device_node *np = ofdev->node;
2643 struct device_node **blist = NULL;
2646 /* Skip unused/unwired EMACS. We leave the check for an unused
2647 * property here for now, but new flat device trees should set a
2648 * status property to "disabled" instead.
2650 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2653 /* Find ourselves in the bootlist if we are there */
2654 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2655 if (emac_boot_list[i] == np)
2656 blist = &emac_boot_list[i];
2658 /* Allocate our net_device structure */
2660 ndev = alloc_etherdev(sizeof(struct emac_instance));
2662 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2666 dev = netdev_priv(ndev);
2670 SET_NETDEV_DEV(ndev, &ofdev->dev);
2672 /* Initialize some embedded data structures */
2673 mutex_init(&dev->mdio_lock);
2674 mutex_init(&dev->link_lock);
2675 spin_lock_init(&dev->lock);
2676 INIT_WORK(&dev->reset_work, emac_reset_work);
2678 /* Init various config data based on device-tree */
2679 err = emac_init_config(dev);
2683 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2684 dev->emac_irq = irq_of_parse_and_map(np, 0);
2685 dev->wol_irq = irq_of_parse_and_map(np, 1);
2686 if (dev->emac_irq == NO_IRQ) {
2687 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2690 ndev->irq = dev->emac_irq;
2693 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2694 printk(KERN_ERR "%s: Can't get registers address\n",
2698 // TODO : request_mem_region
2699 dev->emacp = ioremap(dev->rsrc_regs.start,
2700 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2701 if (dev->emacp == NULL) {
2702 printk(KERN_ERR "%s: Can't map device registers!\n",
2708 /* Wait for dependent devices */
2709 err = emac_wait_deps(dev);
2712 "%s: Timeout waiting for dependent devices\n",
2714 /* display more info about what's missing ? */
2717 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2718 if (dev->mdio_dev != NULL)
2719 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2721 /* Register with MAL */
2722 dev->commac.ops = &emac_commac_ops;
2723 dev->commac.dev = dev;
2724 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2725 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2726 err = mal_register_commac(dev->mal, &dev->commac);
2728 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2729 np->full_name, dev->mal_dev->node->full_name);
2732 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2733 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2735 /* Get pointers to BD rings */
2737 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2739 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2741 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2742 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2745 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2746 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2747 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2748 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2750 /* Attach to ZMII, if needed */
2751 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2752 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2753 goto err_unreg_commac;
2755 /* Attach to RGMII, if needed */
2756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2757 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2758 goto err_detach_zmii;
2760 /* Attach to TAH, if needed */
2761 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2762 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2763 goto err_detach_rgmii;
2765 /* Set some link defaults before we can find out real parameters */
2766 dev->phy.speed = SPEED_100;
2767 dev->phy.duplex = DUPLEX_FULL;
2768 dev->phy.autoneg = AUTONEG_DISABLE;
2769 dev->phy.pause = dev->phy.asym_pause = 0;
2770 dev->stop_timeout = STOP_TIMEOUT_100;
2771 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2773 /* Find PHY if any */
2774 err = emac_init_phy(dev);
2776 goto err_detach_tah;
2778 /* Fill in the driver function table */
2779 ndev->open = &emac_open;
2781 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2782 ndev->tx_timeout = &emac_tx_timeout;
2783 ndev->watchdog_timeo = 5 * HZ;
2784 ndev->stop = &emac_close;
2785 ndev->get_stats = &emac_stats;
2786 ndev->set_multicast_list = &emac_set_multicast_list;
2787 ndev->do_ioctl = &emac_ioctl;
2788 if (emac_phy_supports_gige(dev->phy_mode)) {
2789 ndev->hard_start_xmit = &emac_start_xmit_sg;
2790 ndev->change_mtu = &emac_change_mtu;
2791 dev->commac.ops = &emac_commac_sg_ops;
2793 ndev->hard_start_xmit = &emac_start_xmit;
2795 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2797 netif_carrier_off(ndev);
2798 netif_stop_queue(ndev);
2800 err = register_netdev(ndev);
2802 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2803 np->full_name, err);
2804 goto err_detach_tah;
2807 /* Set our drvdata last as we don't want them visible until we are
2811 dev_set_drvdata(&ofdev->dev, dev);
2813 /* There's a new kid in town ! Let's tell everybody */
2814 wake_up_all(&emac_probe_wait);
2818 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2819 ndev->name, dev->cell_index, np->full_name,
2820 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2821 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2823 if (dev->phy.address >= 0)
2824 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2825 dev->phy.def->name, dev->phy.address);
2827 emac_dbg_register(dev);
2832 /* I have a bad feeling about this ... */
2835 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2836 tah_detach(dev->tah_dev, dev->tah_port);
2838 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2839 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2841 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2842 zmii_detach(dev->zmii_dev, dev->zmii_port);
2844 mal_unregister_commac(dev->mal, &dev->commac);
2848 iounmap(dev->emacp);
2850 if (dev->wol_irq != NO_IRQ)
2851 irq_dispose_mapping(dev->wol_irq);
2852 if (dev->emac_irq != NO_IRQ)
2853 irq_dispose_mapping(dev->emac_irq);
2857 /* if we were on the bootlist, remove us as we won't show up and
2858 * wake up all waiters to notify them in case they were waiting
2863 wake_up_all(&emac_probe_wait);
2868 static int __devexit emac_remove(struct of_device *ofdev)
2870 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2872 DBG(dev, "remove" NL);
2874 dev_set_drvdata(&ofdev->dev, NULL);
2876 unregister_netdev(dev->ndev);
2878 flush_scheduled_work();
2880 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2881 tah_detach(dev->tah_dev, dev->tah_port);
2882 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2883 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2884 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2885 zmii_detach(dev->zmii_dev, dev->zmii_port);
2887 mal_unregister_commac(dev->mal, &dev->commac);
2890 emac_dbg_unregister(dev);
2891 iounmap(dev->emacp);
2893 if (dev->wol_irq != NO_IRQ)
2894 irq_dispose_mapping(dev->wol_irq);
2895 if (dev->emac_irq != NO_IRQ)
2896 irq_dispose_mapping(dev->emac_irq);
2903 /* XXX Features in here should be replaced by properties... */
2904 static struct of_device_id emac_match[] =
2908 .compatible = "ibm,emac",
2912 .compatible = "ibm,emac4",
2916 .compatible = "ibm,emac4sync",
2921 static struct of_platform_driver emac_driver = {
2923 .match_table = emac_match,
2925 .probe = emac_probe,
2926 .remove = emac_remove,
2929 static void __init emac_make_bootlist(void)
2931 struct device_node *np = NULL;
2932 int j, max, i = 0, k;
2933 int cell_indices[EMAC_BOOT_LIST_SIZE];
2936 while((np = of_find_all_nodes(np)) != NULL) {
2939 if (of_match_node(emac_match, np) == NULL)
2941 if (of_get_property(np, "unused", NULL))
2943 idx = of_get_property(np, "cell-index", NULL);
2946 cell_indices[i] = *idx;
2947 emac_boot_list[i++] = of_node_get(np);
2948 if (i >= EMAC_BOOT_LIST_SIZE) {
2955 /* Bubble sort them (doh, what a creative algorithm :-) */
2956 for (i = 0; max > 1 && (i < (max - 1)); i++)
2957 for (j = i; j < max; j++) {
2958 if (cell_indices[i] > cell_indices[j]) {
2959 np = emac_boot_list[i];
2960 emac_boot_list[i] = emac_boot_list[j];
2961 emac_boot_list[j] = np;
2962 k = cell_indices[i];
2963 cell_indices[i] = cell_indices[j];
2964 cell_indices[j] = k;
2969 static int __init emac_init(void)
2973 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2975 /* Init debug stuff */
2978 /* Build EMAC boot list */
2979 emac_make_bootlist();
2981 /* Init submodules */
2994 rc = of_register_platform_driver(&emac_driver);
3012 static void __exit emac_exit(void)
3016 of_unregister_platform_driver(&emac_driver);
3024 /* Destroy EMAC boot list */
3025 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3026 if (emac_boot_list[i])
3027 of_node_put(emac_boot_list[i]);
3030 module_init(emac_init);
3031 module_exit(emac_exit);