2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
208 static inline int emac_phy_gpcs(int phy_mode)
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
214 static inline void emac_tx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 DBG(dev, "tx_enable" NL);
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 static void emac_tx_disable(struct emac_instance *dev)
228 struct emac_regs __iomem *p = dev->emacp;
231 DBG(dev, "tx_disable" NL);
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
242 emac_report_timeout_error(dev, "TX disable timeout");
246 static void emac_rx_enable(struct emac_instance *dev)
248 struct emac_regs __iomem *p = dev->emacp;
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 DBG(dev, "rx_enable" NL);
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
275 static void emac_rx_disable(struct emac_instance *dev)
277 struct emac_regs __iomem *p = dev->emacp;
280 DBG(dev, "rx_disable" NL);
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 emac_report_timeout_error(dev, "RX disable timeout");
295 static inline void emac_netif_stop(struct emac_instance *dev)
297 netif_tx_lock_bh(dev->ndev);
298 netif_addr_lock(dev->ndev);
300 netif_addr_unlock(dev->ndev);
301 netif_tx_unlock_bh(dev->ndev);
302 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
303 mal_poll_disable(dev->mal, &dev->commac);
304 netif_tx_disable(dev->ndev);
307 static inline void emac_netif_start(struct emac_instance *dev)
309 netif_tx_lock_bh(dev->ndev);
310 netif_addr_lock(dev->ndev);
312 if (dev->mcast_pending && netif_running(dev->ndev))
313 __emac_set_multicast_list(dev);
314 netif_addr_unlock(dev->ndev);
315 netif_tx_unlock_bh(dev->ndev);
317 netif_wake_queue(dev->ndev);
319 /* NOTE: unconditional netif_wake_queue is only appropriate
320 * so long as all callers are assured to have free tx slots
321 * (taken from tg3... though the case where that is wrong is
322 * not terribly harmful)
324 mal_poll_enable(dev->mal, &dev->commac);
327 static inline void emac_rx_disable_async(struct emac_instance *dev)
329 struct emac_regs __iomem *p = dev->emacp;
332 DBG(dev, "rx_disable_async" NL);
334 r = in_be32(&p->mr0);
335 if (r & EMAC_MR0_RXE)
336 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 static int emac_reset(struct emac_instance *dev)
341 struct emac_regs __iomem *p = dev->emacp;
344 DBG(dev, "reset" NL);
346 if (!dev->reset_failed) {
347 /* 40x erratum suggests stopping RX channel before reset,
350 emac_rx_disable(dev);
351 emac_tx_disable(dev);
354 out_be32(&p->mr0, EMAC_MR0_SRST);
355 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
359 dev->reset_failed = 0;
362 emac_report_timeout_error(dev, "reset timeout");
363 dev->reset_failed = 1;
368 static void emac_hash_mc(struct emac_instance *dev)
370 const int regs = EMAC_XAHT_REGS(dev);
371 u32 *gaht_base = emac_gaht_base(dev);
373 struct dev_mc_list *dmi;
376 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
378 memset(gaht_temp, 0, sizeof (gaht_temp));
380 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
382 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
383 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
384 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
386 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
387 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
388 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
390 gaht_temp[reg] |= mask;
393 for (i = 0; i < regs; i++)
394 out_be32(gaht_base + i, gaht_temp[i]);
397 static inline u32 emac_iff2rmr(struct net_device *ndev)
399 struct emac_instance *dev = netdev_priv(ndev);
402 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
404 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
409 if (ndev->flags & IFF_PROMISC)
411 else if (ndev->flags & IFF_ALLMULTI ||
412 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
414 else if (ndev->mc_count > 0)
420 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
422 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
424 DBG2(dev, "__emac_calc_base_mr1" NL);
428 ret |= EMAC_MR1_TFS_2K;
431 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
432 dev->ndev->name, tx_size);
437 ret |= EMAC_MR1_RFS_16K;
440 ret |= EMAC_MR1_RFS_4K;
443 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
444 dev->ndev->name, rx_size);
450 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
452 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
453 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
455 DBG2(dev, "__emac4_calc_base_mr1" NL);
459 ret |= EMAC4_MR1_TFS_4K;
462 ret |= EMAC4_MR1_TFS_2K;
465 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
466 dev->ndev->name, tx_size);
471 ret |= EMAC4_MR1_RFS_16K;
474 ret |= EMAC4_MR1_RFS_4K;
477 ret |= EMAC4_MR1_RFS_2K;
480 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
481 dev->ndev->name, rx_size);
487 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
489 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
490 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
491 __emac_calc_base_mr1(dev, tx_size, rx_size);
494 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
496 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
497 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
499 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
502 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
503 unsigned int low, unsigned int high)
505 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
506 return (low << 22) | ( (high & 0x3ff) << 6);
508 return (low << 23) | ( (high & 0x1ff) << 7);
511 static int emac_configure(struct emac_instance *dev)
513 struct emac_regs __iomem *p = dev->emacp;
514 struct net_device *ndev = dev->ndev;
515 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
518 DBG(dev, "configure" NL);
521 out_be32(&p->mr1, in_be32(&p->mr1)
522 | EMAC_MR1_FDE | EMAC_MR1_ILE);
524 } else if (emac_reset(dev) < 0)
527 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
528 tah_reset(dev->tah_dev);
530 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
531 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
533 /* Default fifo sizes */
534 tx_size = dev->tx_fifo_size;
535 rx_size = dev->rx_fifo_size;
537 /* No link, force loopback */
539 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
541 /* Check for full duplex */
542 else if (dev->phy.duplex == DUPLEX_FULL)
543 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
545 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
546 dev->stop_timeout = STOP_TIMEOUT_10;
547 switch (dev->phy.speed) {
549 if (emac_phy_gpcs(dev->phy.mode)) {
550 mr1 |= EMAC_MR1_MF_1000GPCS |
551 EMAC_MR1_MF_IPPA(dev->phy.address);
553 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
554 * identify this GPCS PHY later.
556 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
558 mr1 |= EMAC_MR1_MF_1000;
560 /* Extended fifo sizes */
561 tx_size = dev->tx_fifo_size_gige;
562 rx_size = dev->rx_fifo_size_gige;
564 if (dev->ndev->mtu > ETH_DATA_LEN) {
565 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
566 mr1 |= EMAC4_MR1_JPSM;
568 mr1 |= EMAC_MR1_JPSM;
569 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
571 dev->stop_timeout = STOP_TIMEOUT_1000;
574 mr1 |= EMAC_MR1_MF_100;
575 dev->stop_timeout = STOP_TIMEOUT_100;
577 default: /* make gcc happy */
581 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
582 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
584 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
585 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
587 /* on 40x erratum forces us to NOT use integrated flow control,
588 * let's hope it works on 44x ;)
590 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
591 dev->phy.duplex == DUPLEX_FULL) {
593 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
594 else if (dev->phy.asym_pause)
598 /* Add base settings & fifo sizes & program MR1 */
599 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
600 out_be32(&p->mr1, mr1);
602 /* Set individual MAC address */
603 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
604 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
605 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
608 /* VLAN Tag Protocol ID */
609 out_be32(&p->vtpid, 0x8100);
611 /* Receive mode register */
612 r = emac_iff2rmr(ndev);
613 if (r & EMAC_RMR_MAE)
615 out_be32(&p->rmr, r);
617 /* FIFOs thresholds */
618 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
619 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
620 tx_size / 2 / dev->fifo_entry_size);
622 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
623 tx_size / 2 / dev->fifo_entry_size);
624 out_be32(&p->tmr1, r);
625 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
627 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
628 there should be still enough space in FIFO to allow the our link
629 partner time to process this frame and also time to send PAUSE
632 Here is the worst case scenario for the RX FIFO "headroom"
633 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
635 1) One maximum-length frame on TX 1522 bytes
636 2) One PAUSE frame time 64 bytes
637 3) PAUSE frame decode time allowance 64 bytes
638 4) One maximum-length frame on RX 1522 bytes
639 5) Round-trip propagation delay of the link (100Mb) 15 bytes
643 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
644 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
646 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
647 rx_size / 4 / dev->fifo_entry_size);
648 out_be32(&p->rwmr, r);
650 /* Set PAUSE timer to the maximum */
651 out_be32(&p->ptr, 0xffff);
654 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
655 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
656 EMAC_ISR_IRE | EMAC_ISR_TE;
657 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
658 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
660 out_be32(&p->iser, r);
662 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
663 if (emac_phy_gpcs(dev->phy.mode))
664 emac_mii_reset_phy(&dev->phy);
666 /* Required for Pause packet support in EMAC */
667 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
672 static void emac_reinitialize(struct emac_instance *dev)
674 DBG(dev, "reinitialize" NL);
676 emac_netif_stop(dev);
677 if (!emac_configure(dev)) {
681 emac_netif_start(dev);
684 static void emac_full_tx_reset(struct emac_instance *dev)
686 DBG(dev, "full_tx_reset" NL);
688 emac_tx_disable(dev);
689 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
690 emac_clean_tx_ring(dev);
691 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
695 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
700 static void emac_reset_work(struct work_struct *work)
702 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
704 DBG(dev, "reset_work" NL);
706 mutex_lock(&dev->link_lock);
708 emac_netif_stop(dev);
709 emac_full_tx_reset(dev);
710 emac_netif_start(dev);
712 mutex_unlock(&dev->link_lock);
715 static void emac_tx_timeout(struct net_device *ndev)
717 struct emac_instance *dev = netdev_priv(ndev);
719 DBG(dev, "tx_timeout" NL);
721 schedule_work(&dev->reset_work);
725 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
727 int done = !!(stacr & EMAC_STACR_OC);
729 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
735 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
737 struct emac_regs __iomem *p = dev->emacp;
739 int n, err = -ETIMEDOUT;
741 mutex_lock(&dev->mdio_lock);
743 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
745 /* Enable proper MDIO port */
746 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
747 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
749 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
751 /* Wait for management interface to become idle */
753 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
756 DBG2(dev, " -> timeout wait idle\n");
761 /* Issue read command */
762 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
763 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
765 r = EMAC_STACR_BASE(dev->opb_bus_freq);
766 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
768 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
769 r |= EMACX_STACR_STAC_READ;
771 r |= EMAC_STACR_STAC_READ;
772 r |= (reg & EMAC_STACR_PRA_MASK)
773 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
774 out_be32(&p->stacr, r);
776 /* Wait for read to complete */
778 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
781 DBG2(dev, " -> timeout wait complete\n");
786 if (unlikely(r & EMAC_STACR_PHYE)) {
787 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
792 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
794 DBG2(dev, "mdio_read -> %04x" NL, r);
797 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
798 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
799 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
800 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
801 mutex_unlock(&dev->mdio_lock);
803 return err == 0 ? r : err;
806 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
809 struct emac_regs __iomem *p = dev->emacp;
811 int n, err = -ETIMEDOUT;
813 mutex_lock(&dev->mdio_lock);
815 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
817 /* Enable proper MDIO port */
818 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
819 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
820 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
821 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
823 /* Wait for management interface to be idle */
825 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
828 DBG2(dev, " -> timeout wait idle\n");
833 /* Issue write command */
834 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
835 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
837 r = EMAC_STACR_BASE(dev->opb_bus_freq);
838 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
840 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
841 r |= EMACX_STACR_STAC_WRITE;
843 r |= EMAC_STACR_STAC_WRITE;
844 r |= (reg & EMAC_STACR_PRA_MASK) |
845 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
846 (val << EMAC_STACR_PHYD_SHIFT);
847 out_be32(&p->stacr, r);
849 /* Wait for write to complete */
851 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
854 DBG2(dev, " -> timeout wait complete\n");
860 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864 mutex_unlock(&dev->mdio_lock);
867 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
869 struct emac_instance *dev = netdev_priv(ndev);
872 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
877 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
879 struct emac_instance *dev = netdev_priv(ndev);
881 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
882 (u8) id, (u8) reg, (u16) val);
886 static void __emac_set_multicast_list(struct emac_instance *dev)
888 struct emac_regs __iomem *p = dev->emacp;
889 u32 rmr = emac_iff2rmr(dev->ndev);
891 DBG(dev, "__multicast %08x" NL, rmr);
893 /* I decided to relax register access rules here to avoid
896 * There is a real problem with EMAC4 core if we use MWSW_001 bit
897 * in MR1 register and do a full EMAC reset.
898 * One TX BD status update is delayed and, after EMAC reset, it
899 * never happens, resulting in TX hung (it'll be recovered by TX
900 * timeout handler eventually, but this is just gross).
901 * So we either have to do full TX reset or try to cheat here :)
903 * The only required change is to RX mode register, so I *think* all
904 * we need is just to stop RX channel. This seems to work on all
907 * If we need the full reset, we might just trigger the workqueue
908 * and do it async... a bit nasty but should work --BenH
910 dev->mcast_pending = 0;
911 emac_rx_disable(dev);
912 if (rmr & EMAC_RMR_MAE)
914 out_be32(&p->rmr, rmr);
919 static void emac_set_multicast_list(struct net_device *ndev)
921 struct emac_instance *dev = netdev_priv(ndev);
923 DBG(dev, "multicast" NL);
925 BUG_ON(!netif_running(dev->ndev));
928 dev->mcast_pending = 1;
931 __emac_set_multicast_list(dev);
934 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
936 int rx_sync_size = emac_rx_sync_size(new_mtu);
937 int rx_skb_size = emac_rx_skb_size(new_mtu);
940 mutex_lock(&dev->link_lock);
941 emac_netif_stop(dev);
942 emac_rx_disable(dev);
943 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
945 if (dev->rx_sg_skb) {
946 ++dev->estats.rx_dropped_resize;
947 dev_kfree_skb(dev->rx_sg_skb);
948 dev->rx_sg_skb = NULL;
951 /* Make a first pass over RX ring and mark BDs ready, dropping
952 * non-processed packets on the way. We need this as a separate pass
953 * to simplify error recovery in the case of allocation failure later.
955 for (i = 0; i < NUM_RX_BUFF; ++i) {
956 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
957 ++dev->estats.rx_dropped_resize;
959 dev->rx_desc[i].data_len = 0;
960 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
961 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
964 /* Reallocate RX ring only if bigger skb buffers are required */
965 if (rx_skb_size <= dev->rx_skb_size)
968 /* Second pass, allocate new skbs */
969 for (i = 0; i < NUM_RX_BUFF; ++i) {
970 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
976 BUG_ON(!dev->rx_skb[i]);
977 dev_kfree_skb(dev->rx_skb[i]);
979 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
980 dev->rx_desc[i].data_ptr =
981 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
982 DMA_FROM_DEVICE) + 2;
983 dev->rx_skb[i] = skb;
986 /* Check if we need to change "Jumbo" bit in MR1 */
987 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
988 /* This is to prevent starting RX channel in emac_rx_enable() */
989 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
991 dev->ndev->mtu = new_mtu;
992 emac_full_tx_reset(dev);
995 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
998 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1000 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1001 emac_rx_enable(dev);
1002 emac_netif_start(dev);
1003 mutex_unlock(&dev->link_lock);
1008 /* Process ctx, rtnl_lock semaphore */
1009 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1011 struct emac_instance *dev = netdev_priv(ndev);
1014 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1017 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1019 if (netif_running(ndev)) {
1020 /* Check if we really need to reinitalize RX ring */
1021 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1022 ret = emac_resize_rx_ring(dev, new_mtu);
1026 ndev->mtu = new_mtu;
1027 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1028 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1034 static void emac_clean_tx_ring(struct emac_instance *dev)
1038 for (i = 0; i < NUM_TX_BUFF; ++i) {
1039 if (dev->tx_skb[i]) {
1040 dev_kfree_skb(dev->tx_skb[i]);
1041 dev->tx_skb[i] = NULL;
1042 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1043 ++dev->estats.tx_dropped;
1045 dev->tx_desc[i].ctrl = 0;
1046 dev->tx_desc[i].data_ptr = 0;
1050 static void emac_clean_rx_ring(struct emac_instance *dev)
1054 for (i = 0; i < NUM_RX_BUFF; ++i)
1055 if (dev->rx_skb[i]) {
1056 dev->rx_desc[i].ctrl = 0;
1057 dev_kfree_skb(dev->rx_skb[i]);
1058 dev->rx_skb[i] = NULL;
1059 dev->rx_desc[i].data_ptr = 0;
1062 if (dev->rx_sg_skb) {
1063 dev_kfree_skb(dev->rx_sg_skb);
1064 dev->rx_sg_skb = NULL;
1068 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1071 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1075 dev->rx_skb[slot] = skb;
1076 dev->rx_desc[slot].data_len = 0;
1078 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1079 dev->rx_desc[slot].data_ptr =
1080 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1081 DMA_FROM_DEVICE) + 2;
1083 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1084 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1089 static void emac_print_link_status(struct emac_instance *dev)
1091 if (netif_carrier_ok(dev->ndev))
1092 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1093 dev->ndev->name, dev->phy.speed,
1094 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1095 dev->phy.pause ? ", pause enabled" :
1096 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1098 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1101 /* Process ctx, rtnl_lock semaphore */
1102 static int emac_open(struct net_device *ndev)
1104 struct emac_instance *dev = netdev_priv(ndev);
1107 DBG(dev, "open" NL);
1109 /* Setup error IRQ handler */
1110 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1112 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1113 ndev->name, dev->emac_irq);
1117 /* Allocate RX ring */
1118 for (i = 0; i < NUM_RX_BUFF; ++i)
1119 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1120 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1125 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1126 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1127 dev->rx_sg_skb = NULL;
1129 mutex_lock(&dev->link_lock);
1132 /* Start PHY polling now.
1134 if (dev->phy.address >= 0) {
1135 int link_poll_interval;
1136 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1137 dev->phy.def->ops->read_link(&dev->phy);
1138 emac_rx_clk_default(dev);
1139 netif_carrier_on(dev->ndev);
1140 link_poll_interval = PHY_POLL_LINK_ON;
1142 emac_rx_clk_tx(dev);
1143 netif_carrier_off(dev->ndev);
1144 link_poll_interval = PHY_POLL_LINK_OFF;
1146 dev->link_polling = 1;
1148 schedule_delayed_work(&dev->link_work, link_poll_interval);
1149 emac_print_link_status(dev);
1151 netif_carrier_on(dev->ndev);
1153 emac_configure(dev);
1154 mal_poll_add(dev->mal, &dev->commac);
1155 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1156 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1157 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1158 emac_tx_enable(dev);
1159 emac_rx_enable(dev);
1160 emac_netif_start(dev);
1162 mutex_unlock(&dev->link_lock);
1166 emac_clean_rx_ring(dev);
1167 free_irq(dev->emac_irq, dev);
1174 static int emac_link_differs(struct emac_instance *dev)
1176 u32 r = in_be32(&dev->emacp->mr1);
1178 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1179 int speed, pause, asym_pause;
1181 if (r & EMAC_MR1_MF_1000)
1183 else if (r & EMAC_MR1_MF_100)
1188 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1189 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1198 pause = asym_pause = 0;
1200 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1201 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1205 static void emac_link_timer(struct work_struct *work)
1207 struct emac_instance *dev =
1208 container_of((struct delayed_work *)work,
1209 struct emac_instance, link_work);
1210 int link_poll_interval;
1212 mutex_lock(&dev->link_lock);
1213 DBG2(dev, "link timer" NL);
1218 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1219 if (!netif_carrier_ok(dev->ndev)) {
1220 emac_rx_clk_default(dev);
1221 /* Get new link parameters */
1222 dev->phy.def->ops->read_link(&dev->phy);
1224 netif_carrier_on(dev->ndev);
1225 emac_netif_stop(dev);
1226 emac_full_tx_reset(dev);
1227 emac_netif_start(dev);
1228 emac_print_link_status(dev);
1230 link_poll_interval = PHY_POLL_LINK_ON;
1232 if (netif_carrier_ok(dev->ndev)) {
1233 emac_rx_clk_tx(dev);
1234 netif_carrier_off(dev->ndev);
1235 netif_tx_disable(dev->ndev);
1236 emac_reinitialize(dev);
1237 emac_print_link_status(dev);
1239 link_poll_interval = PHY_POLL_LINK_OFF;
1241 schedule_delayed_work(&dev->link_work, link_poll_interval);
1243 mutex_unlock(&dev->link_lock);
1246 static void emac_force_link_update(struct emac_instance *dev)
1248 netif_carrier_off(dev->ndev);
1250 if (dev->link_polling) {
1251 cancel_rearming_delayed_work(&dev->link_work);
1252 if (dev->link_polling)
1253 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1257 /* Process ctx, rtnl_lock semaphore */
1258 static int emac_close(struct net_device *ndev)
1260 struct emac_instance *dev = netdev_priv(ndev);
1262 DBG(dev, "close" NL);
1264 if (dev->phy.address >= 0) {
1265 dev->link_polling = 0;
1266 cancel_rearming_delayed_work(&dev->link_work);
1268 mutex_lock(&dev->link_lock);
1269 emac_netif_stop(dev);
1271 mutex_unlock(&dev->link_lock);
1273 emac_rx_disable(dev);
1274 emac_tx_disable(dev);
1275 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1276 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1277 mal_poll_del(dev->mal, &dev->commac);
1279 emac_clean_tx_ring(dev);
1280 emac_clean_rx_ring(dev);
1282 free_irq(dev->emac_irq, dev);
1287 static inline u16 emac_tx_csum(struct emac_instance *dev,
1288 struct sk_buff *skb)
1290 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1291 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1292 ++dev->stats.tx_packets_csum;
1293 return EMAC_TX_CTRL_TAH_CSUM;
1298 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1300 struct emac_regs __iomem *p = dev->emacp;
1301 struct net_device *ndev = dev->ndev;
1303 /* Send the packet out. If the if makes a significant perf
1304 * difference, then we can store the TMR0 value in "dev"
1307 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1308 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1310 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1312 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1313 netif_stop_queue(ndev);
1314 DBG2(dev, "stopped TX queue" NL);
1317 ndev->trans_start = jiffies;
1318 ++dev->stats.tx_packets;
1319 dev->stats.tx_bytes += len;
1325 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1327 struct emac_instance *dev = netdev_priv(ndev);
1328 unsigned int len = skb->len;
1331 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1332 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1334 slot = dev->tx_slot++;
1335 if (dev->tx_slot == NUM_TX_BUFF) {
1337 ctrl |= MAL_TX_CTRL_WRAP;
1340 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1342 dev->tx_skb[slot] = skb;
1343 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1346 dev->tx_desc[slot].data_len = (u16) len;
1348 dev->tx_desc[slot].ctrl = ctrl;
1350 return emac_xmit_finish(dev, len);
1353 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1354 u32 pd, int len, int last, u16 base_ctrl)
1357 u16 ctrl = base_ctrl;
1358 int chunk = min(len, MAL_MAX_TX_SIZE);
1361 slot = (slot + 1) % NUM_TX_BUFF;
1364 ctrl |= MAL_TX_CTRL_LAST;
1365 if (slot == NUM_TX_BUFF - 1)
1366 ctrl |= MAL_TX_CTRL_WRAP;
1368 dev->tx_skb[slot] = NULL;
1369 dev->tx_desc[slot].data_ptr = pd;
1370 dev->tx_desc[slot].data_len = (u16) chunk;
1371 dev->tx_desc[slot].ctrl = ctrl;
1382 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1383 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1385 struct emac_instance *dev = netdev_priv(ndev);
1386 int nr_frags = skb_shinfo(skb)->nr_frags;
1387 int len = skb->len, chunk;
1392 /* This is common "fast" path */
1393 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1394 return emac_start_xmit(skb, ndev);
1396 len -= skb->data_len;
1398 /* Note, this is only an *estimation*, we can still run out of empty
1399 * slots because of the additional fragmentation into
1400 * MAL_MAX_TX_SIZE-sized chunks
1402 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1405 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1406 emac_tx_csum(dev, skb);
1407 slot = dev->tx_slot;
1410 dev->tx_skb[slot] = NULL;
1411 chunk = min(len, MAL_MAX_TX_SIZE);
1412 dev->tx_desc[slot].data_ptr = pd =
1413 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1414 dev->tx_desc[slot].data_len = (u16) chunk;
1417 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1420 for (i = 0; i < nr_frags; ++i) {
1421 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1424 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1427 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1430 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1434 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1436 /* Attach skb to the last slot so we don't release it too early */
1437 dev->tx_skb[slot] = skb;
1439 /* Send the packet out */
1440 if (dev->tx_slot == NUM_TX_BUFF - 1)
1441 ctrl |= MAL_TX_CTRL_WRAP;
1443 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1444 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1446 return emac_xmit_finish(dev, skb->len);
1449 /* Well, too bad. Our previous estimation was overly optimistic.
1452 while (slot != dev->tx_slot) {
1453 dev->tx_desc[slot].ctrl = 0;
1456 slot = NUM_TX_BUFF - 1;
1458 ++dev->estats.tx_undo;
1461 netif_stop_queue(ndev);
1462 DBG2(dev, "stopped TX queue" NL);
1467 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1469 struct emac_error_stats *st = &dev->estats;
1471 DBG(dev, "BD TX error %04x" NL, ctrl);
1474 if (ctrl & EMAC_TX_ST_BFCS)
1475 ++st->tx_bd_bad_fcs;
1476 if (ctrl & EMAC_TX_ST_LCS)
1477 ++st->tx_bd_carrier_loss;
1478 if (ctrl & EMAC_TX_ST_ED)
1479 ++st->tx_bd_excessive_deferral;
1480 if (ctrl & EMAC_TX_ST_EC)
1481 ++st->tx_bd_excessive_collisions;
1482 if (ctrl & EMAC_TX_ST_LC)
1483 ++st->tx_bd_late_collision;
1484 if (ctrl & EMAC_TX_ST_MC)
1485 ++st->tx_bd_multple_collisions;
1486 if (ctrl & EMAC_TX_ST_SC)
1487 ++st->tx_bd_single_collision;
1488 if (ctrl & EMAC_TX_ST_UR)
1489 ++st->tx_bd_underrun;
1490 if (ctrl & EMAC_TX_ST_SQE)
1494 static void emac_poll_tx(void *param)
1496 struct emac_instance *dev = param;
1499 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1501 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1502 bad_mask = EMAC_IS_BAD_TX_TAH;
1504 bad_mask = EMAC_IS_BAD_TX;
1506 netif_tx_lock_bh(dev->ndev);
1509 int slot = dev->ack_slot, n = 0;
1511 ctrl = dev->tx_desc[slot].ctrl;
1512 if (!(ctrl & MAL_TX_CTRL_READY)) {
1513 struct sk_buff *skb = dev->tx_skb[slot];
1518 dev->tx_skb[slot] = NULL;
1520 slot = (slot + 1) % NUM_TX_BUFF;
1522 if (unlikely(ctrl & bad_mask))
1523 emac_parse_tx_error(dev, ctrl);
1529 dev->ack_slot = slot;
1530 if (netif_queue_stopped(dev->ndev) &&
1531 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1532 netif_wake_queue(dev->ndev);
1534 DBG2(dev, "tx %d pkts" NL, n);
1537 netif_tx_unlock_bh(dev->ndev);
1540 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1543 struct sk_buff *skb = dev->rx_skb[slot];
1545 DBG2(dev, "recycle %d %d" NL, slot, len);
1548 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1549 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1551 dev->rx_desc[slot].data_len = 0;
1553 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1554 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1557 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1559 struct emac_error_stats *st = &dev->estats;
1561 DBG(dev, "BD RX error %04x" NL, ctrl);
1564 if (ctrl & EMAC_RX_ST_OE)
1565 ++st->rx_bd_overrun;
1566 if (ctrl & EMAC_RX_ST_BP)
1567 ++st->rx_bd_bad_packet;
1568 if (ctrl & EMAC_RX_ST_RP)
1569 ++st->rx_bd_runt_packet;
1570 if (ctrl & EMAC_RX_ST_SE)
1571 ++st->rx_bd_short_event;
1572 if (ctrl & EMAC_RX_ST_AE)
1573 ++st->rx_bd_alignment_error;
1574 if (ctrl & EMAC_RX_ST_BFCS)
1575 ++st->rx_bd_bad_fcs;
1576 if (ctrl & EMAC_RX_ST_PTL)
1577 ++st->rx_bd_packet_too_long;
1578 if (ctrl & EMAC_RX_ST_ORE)
1579 ++st->rx_bd_out_of_range;
1580 if (ctrl & EMAC_RX_ST_IRE)
1581 ++st->rx_bd_in_range;
1584 static inline void emac_rx_csum(struct emac_instance *dev,
1585 struct sk_buff *skb, u16 ctrl)
1587 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1588 if (!ctrl && dev->tah_dev) {
1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
1590 ++dev->stats.rx_packets_csum;
1595 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1597 if (likely(dev->rx_sg_skb != NULL)) {
1598 int len = dev->rx_desc[slot].data_len;
1599 int tot_len = dev->rx_sg_skb->len + len;
1601 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1602 ++dev->estats.rx_dropped_mtu;
1603 dev_kfree_skb(dev->rx_sg_skb);
1604 dev->rx_sg_skb = NULL;
1606 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1607 dev->rx_skb[slot]->data, len);
1608 skb_put(dev->rx_sg_skb, len);
1609 emac_recycle_rx_skb(dev, slot, len);
1613 emac_recycle_rx_skb(dev, slot, 0);
1617 /* NAPI poll context */
1618 static int emac_poll_rx(void *param, int budget)
1620 struct emac_instance *dev = param;
1621 int slot = dev->rx_slot, received = 0;
1623 DBG2(dev, "poll_rx(%d)" NL, budget);
1626 while (budget > 0) {
1628 struct sk_buff *skb;
1629 u16 ctrl = dev->rx_desc[slot].ctrl;
1631 if (ctrl & MAL_RX_CTRL_EMPTY)
1634 skb = dev->rx_skb[slot];
1636 len = dev->rx_desc[slot].data_len;
1638 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1641 ctrl &= EMAC_BAD_RX_MASK;
1642 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1643 emac_parse_rx_error(dev, ctrl);
1644 ++dev->estats.rx_dropped_error;
1645 emac_recycle_rx_skb(dev, slot, 0);
1650 if (len < ETH_HLEN) {
1651 ++dev->estats.rx_dropped_stack;
1652 emac_recycle_rx_skb(dev, slot, len);
1656 if (len && len < EMAC_RX_COPY_THRESH) {
1657 struct sk_buff *copy_skb =
1658 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1659 if (unlikely(!copy_skb))
1662 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1663 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1665 emac_recycle_rx_skb(dev, slot, len);
1667 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1672 skb->dev = dev->ndev;
1673 skb->protocol = eth_type_trans(skb, dev->ndev);
1674 emac_rx_csum(dev, skb, ctrl);
1676 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1677 ++dev->estats.rx_dropped_stack;
1679 ++dev->stats.rx_packets;
1681 dev->stats.rx_bytes += len;
1682 slot = (slot + 1) % NUM_RX_BUFF;
1687 if (ctrl & MAL_RX_CTRL_FIRST) {
1688 BUG_ON(dev->rx_sg_skb);
1689 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1690 DBG(dev, "rx OOM %d" NL, slot);
1691 ++dev->estats.rx_dropped_oom;
1692 emac_recycle_rx_skb(dev, slot, 0);
1694 dev->rx_sg_skb = skb;
1697 } else if (!emac_rx_sg_append(dev, slot) &&
1698 (ctrl & MAL_RX_CTRL_LAST)) {
1700 skb = dev->rx_sg_skb;
1701 dev->rx_sg_skb = NULL;
1703 ctrl &= EMAC_BAD_RX_MASK;
1704 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1705 emac_parse_rx_error(dev, ctrl);
1706 ++dev->estats.rx_dropped_error;
1714 DBG(dev, "rx OOM %d" NL, slot);
1715 /* Drop the packet and recycle skb */
1716 ++dev->estats.rx_dropped_oom;
1717 emac_recycle_rx_skb(dev, slot, 0);
1722 DBG2(dev, "rx %d BDs" NL, received);
1723 dev->rx_slot = slot;
1726 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1728 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1729 DBG2(dev, "rx restart" NL);
1734 if (dev->rx_sg_skb) {
1735 DBG2(dev, "dropping partial rx packet" NL);
1736 ++dev->estats.rx_dropped_error;
1737 dev_kfree_skb(dev->rx_sg_skb);
1738 dev->rx_sg_skb = NULL;
1741 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1742 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1743 emac_rx_enable(dev);
1749 /* NAPI poll context */
1750 static int emac_peek_rx(void *param)
1752 struct emac_instance *dev = param;
1754 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1757 /* NAPI poll context */
1758 static int emac_peek_rx_sg(void *param)
1760 struct emac_instance *dev = param;
1762 int slot = dev->rx_slot;
1764 u16 ctrl = dev->rx_desc[slot].ctrl;
1765 if (ctrl & MAL_RX_CTRL_EMPTY)
1767 else if (ctrl & MAL_RX_CTRL_LAST)
1770 slot = (slot + 1) % NUM_RX_BUFF;
1772 /* I'm just being paranoid here :) */
1773 if (unlikely(slot == dev->rx_slot))
1779 static void emac_rxde(void *param)
1781 struct emac_instance *dev = param;
1783 ++dev->estats.rx_stopped;
1784 emac_rx_disable_async(dev);
1788 static irqreturn_t emac_irq(int irq, void *dev_instance)
1790 struct emac_instance *dev = dev_instance;
1791 struct emac_regs __iomem *p = dev->emacp;
1792 struct emac_error_stats *st = &dev->estats;
1795 spin_lock(&dev->lock);
1797 isr = in_be32(&p->isr);
1798 out_be32(&p->isr, isr);
1800 DBG(dev, "isr = %08x" NL, isr);
1802 if (isr & EMAC4_ISR_TXPE)
1804 if (isr & EMAC4_ISR_RXPE)
1806 if (isr & EMAC4_ISR_TXUE)
1808 if (isr & EMAC4_ISR_RXOE)
1809 ++st->rx_fifo_overrun;
1810 if (isr & EMAC_ISR_OVR)
1812 if (isr & EMAC_ISR_BP)
1813 ++st->rx_bad_packet;
1814 if (isr & EMAC_ISR_RP)
1815 ++st->rx_runt_packet;
1816 if (isr & EMAC_ISR_SE)
1817 ++st->rx_short_event;
1818 if (isr & EMAC_ISR_ALE)
1819 ++st->rx_alignment_error;
1820 if (isr & EMAC_ISR_BFCS)
1822 if (isr & EMAC_ISR_PTLE)
1823 ++st->rx_packet_too_long;
1824 if (isr & EMAC_ISR_ORE)
1825 ++st->rx_out_of_range;
1826 if (isr & EMAC_ISR_IRE)
1828 if (isr & EMAC_ISR_SQE)
1830 if (isr & EMAC_ISR_TE)
1833 spin_unlock(&dev->lock);
1838 static struct net_device_stats *emac_stats(struct net_device *ndev)
1840 struct emac_instance *dev = netdev_priv(ndev);
1841 struct emac_stats *st = &dev->stats;
1842 struct emac_error_stats *est = &dev->estats;
1843 struct net_device_stats *nst = &dev->nstats;
1844 unsigned long flags;
1846 DBG2(dev, "stats" NL);
1848 /* Compute "legacy" statistics */
1849 spin_lock_irqsave(&dev->lock, flags);
1850 nst->rx_packets = (unsigned long)st->rx_packets;
1851 nst->rx_bytes = (unsigned long)st->rx_bytes;
1852 nst->tx_packets = (unsigned long)st->tx_packets;
1853 nst->tx_bytes = (unsigned long)st->tx_bytes;
1854 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1855 est->rx_dropped_error +
1856 est->rx_dropped_resize +
1857 est->rx_dropped_mtu);
1858 nst->tx_dropped = (unsigned long)est->tx_dropped;
1860 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1861 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1862 est->rx_fifo_overrun +
1864 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1865 est->rx_alignment_error);
1866 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1868 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1869 est->rx_bd_short_event +
1870 est->rx_bd_packet_too_long +
1871 est->rx_bd_out_of_range +
1872 est->rx_bd_in_range +
1873 est->rx_runt_packet +
1874 est->rx_short_event +
1875 est->rx_packet_too_long +
1876 est->rx_out_of_range +
1879 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1880 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1882 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1883 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1884 est->tx_bd_excessive_collisions +
1885 est->tx_bd_late_collision +
1886 est->tx_bd_multple_collisions);
1887 spin_unlock_irqrestore(&dev->lock, flags);
1891 static struct mal_commac_ops emac_commac_ops = {
1892 .poll_tx = &emac_poll_tx,
1893 .poll_rx = &emac_poll_rx,
1894 .peek_rx = &emac_peek_rx,
1898 static struct mal_commac_ops emac_commac_sg_ops = {
1899 .poll_tx = &emac_poll_tx,
1900 .poll_rx = &emac_poll_rx,
1901 .peek_rx = &emac_peek_rx_sg,
1905 /* Ethtool support */
1906 static int emac_ethtool_get_settings(struct net_device *ndev,
1907 struct ethtool_cmd *cmd)
1909 struct emac_instance *dev = netdev_priv(ndev);
1911 cmd->supported = dev->phy.features;
1912 cmd->port = PORT_MII;
1913 cmd->phy_address = dev->phy.address;
1915 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1917 mutex_lock(&dev->link_lock);
1918 cmd->advertising = dev->phy.advertising;
1919 cmd->autoneg = dev->phy.autoneg;
1920 cmd->speed = dev->phy.speed;
1921 cmd->duplex = dev->phy.duplex;
1922 mutex_unlock(&dev->link_lock);
1927 static int emac_ethtool_set_settings(struct net_device *ndev,
1928 struct ethtool_cmd *cmd)
1930 struct emac_instance *dev = netdev_priv(ndev);
1931 u32 f = dev->phy.features;
1933 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1934 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1936 /* Basic sanity checks */
1937 if (dev->phy.address < 0)
1939 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1941 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1943 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1946 if (cmd->autoneg == AUTONEG_DISABLE) {
1947 switch (cmd->speed) {
1949 if (cmd->duplex == DUPLEX_HALF
1950 && !(f & SUPPORTED_10baseT_Half))
1952 if (cmd->duplex == DUPLEX_FULL
1953 && !(f & SUPPORTED_10baseT_Full))
1957 if (cmd->duplex == DUPLEX_HALF
1958 && !(f & SUPPORTED_100baseT_Half))
1960 if (cmd->duplex == DUPLEX_FULL
1961 && !(f & SUPPORTED_100baseT_Full))
1965 if (cmd->duplex == DUPLEX_HALF
1966 && !(f & SUPPORTED_1000baseT_Half))
1968 if (cmd->duplex == DUPLEX_FULL
1969 && !(f & SUPPORTED_1000baseT_Full))
1976 mutex_lock(&dev->link_lock);
1977 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1979 mutex_unlock(&dev->link_lock);
1982 if (!(f & SUPPORTED_Autoneg))
1985 mutex_lock(&dev->link_lock);
1986 dev->phy.def->ops->setup_aneg(&dev->phy,
1987 (cmd->advertising & f) |
1988 (dev->phy.advertising &
1990 ADVERTISED_Asym_Pause)));
1991 mutex_unlock(&dev->link_lock);
1993 emac_force_link_update(dev);
1998 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1999 struct ethtool_ringparam *rp)
2001 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2002 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2005 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2006 struct ethtool_pauseparam *pp)
2008 struct emac_instance *dev = netdev_priv(ndev);
2010 mutex_lock(&dev->link_lock);
2011 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2012 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2015 if (dev->phy.duplex == DUPLEX_FULL) {
2017 pp->rx_pause = pp->tx_pause = 1;
2018 else if (dev->phy.asym_pause)
2021 mutex_unlock(&dev->link_lock);
2024 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2026 struct emac_instance *dev = netdev_priv(ndev);
2028 return dev->tah_dev != NULL;
2031 static int emac_get_regs_len(struct emac_instance *dev)
2033 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2034 return sizeof(struct emac_ethtool_regs_subhdr) +
2035 EMAC4_ETHTOOL_REGS_SIZE(dev);
2037 return sizeof(struct emac_ethtool_regs_subhdr) +
2038 EMAC_ETHTOOL_REGS_SIZE(dev);
2041 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2043 struct emac_instance *dev = netdev_priv(ndev);
2046 size = sizeof(struct emac_ethtool_regs_hdr) +
2047 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2048 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2049 size += zmii_get_regs_len(dev->zmii_dev);
2050 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2051 size += rgmii_get_regs_len(dev->rgmii_dev);
2052 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2053 size += tah_get_regs_len(dev->tah_dev);
2058 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2060 struct emac_ethtool_regs_subhdr *hdr = buf;
2062 hdr->index = dev->cell_index;
2063 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2064 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2065 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2066 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2068 hdr->version = EMAC_ETHTOOL_REGS_VER;
2069 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2070 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2074 static void emac_ethtool_get_regs(struct net_device *ndev,
2075 struct ethtool_regs *regs, void *buf)
2077 struct emac_instance *dev = netdev_priv(ndev);
2078 struct emac_ethtool_regs_hdr *hdr = buf;
2080 hdr->components = 0;
2083 buf = mal_dump_regs(dev->mal, buf);
2084 buf = emac_dump_regs(dev, buf);
2085 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2086 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2087 buf = zmii_dump_regs(dev->zmii_dev, buf);
2089 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2090 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2091 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2093 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2094 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2095 buf = tah_dump_regs(dev->tah_dev, buf);
2099 static int emac_ethtool_nway_reset(struct net_device *ndev)
2101 struct emac_instance *dev = netdev_priv(ndev);
2104 DBG(dev, "nway_reset" NL);
2106 if (dev->phy.address < 0)
2109 mutex_lock(&dev->link_lock);
2110 if (!dev->phy.autoneg) {
2115 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2117 mutex_unlock(&dev->link_lock);
2118 emac_force_link_update(dev);
2122 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2124 return EMAC_ETHTOOL_STATS_COUNT;
2127 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2130 if (stringset == ETH_SS_STATS)
2131 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2134 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2135 struct ethtool_stats *estats,
2138 struct emac_instance *dev = netdev_priv(ndev);
2140 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2141 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2142 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2145 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2146 struct ethtool_drvinfo *info)
2148 struct emac_instance *dev = netdev_priv(ndev);
2150 strcpy(info->driver, "ibm_emac");
2151 strcpy(info->version, DRV_VERSION);
2152 info->fw_version[0] = '\0';
2153 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2154 dev->cell_index, dev->ofdev->node->full_name);
2155 info->n_stats = emac_ethtool_get_stats_count(ndev);
2156 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2159 static const struct ethtool_ops emac_ethtool_ops = {
2160 .get_settings = emac_ethtool_get_settings,
2161 .set_settings = emac_ethtool_set_settings,
2162 .get_drvinfo = emac_ethtool_get_drvinfo,
2164 .get_regs_len = emac_ethtool_get_regs_len,
2165 .get_regs = emac_ethtool_get_regs,
2167 .nway_reset = emac_ethtool_nway_reset,
2169 .get_ringparam = emac_ethtool_get_ringparam,
2170 .get_pauseparam = emac_ethtool_get_pauseparam,
2172 .get_rx_csum = emac_ethtool_get_rx_csum,
2174 .get_strings = emac_ethtool_get_strings,
2175 .get_stats_count = emac_ethtool_get_stats_count,
2176 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2178 .get_link = ethtool_op_get_link,
2179 .get_tx_csum = ethtool_op_get_tx_csum,
2180 .get_sg = ethtool_op_get_sg,
2183 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2185 struct emac_instance *dev = netdev_priv(ndev);
2186 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2188 DBG(dev, "ioctl %08x" NL, cmd);
2190 if (dev->phy.address < 0)
2195 case SIOCDEVPRIVATE:
2196 data[0] = dev->phy.address;
2199 case SIOCDEVPRIVATE + 1:
2200 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2204 case SIOCDEVPRIVATE + 2:
2205 if (!capable(CAP_NET_ADMIN))
2207 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2214 struct emac_depentry {
2216 struct device_node *node;
2217 struct of_device *ofdev;
2221 #define EMAC_DEP_MAL_IDX 0
2222 #define EMAC_DEP_ZMII_IDX 1
2223 #define EMAC_DEP_RGMII_IDX 2
2224 #define EMAC_DEP_TAH_IDX 3
2225 #define EMAC_DEP_MDIO_IDX 4
2226 #define EMAC_DEP_PREV_IDX 5
2227 #define EMAC_DEP_COUNT 6
2229 static int __devinit emac_check_deps(struct emac_instance *dev,
2230 struct emac_depentry *deps)
2233 struct device_node *np;
2235 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2236 /* no dependency on that item, allright */
2237 if (deps[i].phandle == 0) {
2241 /* special case for blist as the dependency might go away */
2242 if (i == EMAC_DEP_PREV_IDX) {
2243 np = *(dev->blist - 1);
2245 deps[i].phandle = 0;
2249 if (deps[i].node == NULL)
2250 deps[i].node = of_node_get(np);
2252 if (deps[i].node == NULL)
2253 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2254 if (deps[i].node == NULL)
2256 if (deps[i].ofdev == NULL)
2257 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2258 if (deps[i].ofdev == NULL)
2260 if (deps[i].drvdata == NULL)
2261 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2262 if (deps[i].drvdata != NULL)
2265 return (there == EMAC_DEP_COUNT);
2268 static void emac_put_deps(struct emac_instance *dev)
2271 of_dev_put(dev->mal_dev);
2273 of_dev_put(dev->zmii_dev);
2275 of_dev_put(dev->rgmii_dev);
2277 of_dev_put(dev->mdio_dev);
2279 of_dev_put(dev->tah_dev);
2282 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2283 unsigned long action, void *data)
2285 /* We are only intereted in device addition */
2286 if (action == BUS_NOTIFY_BOUND_DRIVER)
2287 wake_up_all(&emac_probe_wait);
2291 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2292 .notifier_call = emac_of_bus_notify
2295 static int __devinit emac_wait_deps(struct emac_instance *dev)
2297 struct emac_depentry deps[EMAC_DEP_COUNT];
2300 memset(&deps, 0, sizeof(deps));
2302 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2303 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2304 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2306 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2308 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2309 if (dev->blist && dev->blist > emac_boot_list)
2310 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2311 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2312 wait_event_timeout(emac_probe_wait,
2313 emac_check_deps(dev, deps),
2314 EMAC_PROBE_DEP_TIMEOUT);
2315 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2316 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2317 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2319 of_node_put(deps[i].node);
2320 if (err && deps[i].ofdev)
2321 of_dev_put(deps[i].ofdev);
2324 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2325 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2326 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2327 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2328 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2330 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2331 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2335 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2336 u32 *val, int fatal)
2339 const u32 *prop = of_get_property(np, name, &len);
2340 if (prop == NULL || len < sizeof(u32)) {
2342 printk(KERN_ERR "%s: missing %s property\n",
2343 np->full_name, name);
2350 static int __devinit emac_init_phy(struct emac_instance *dev)
2352 struct device_node *np = dev->ofdev->node;
2353 struct net_device *ndev = dev->ndev;
2357 dev->phy.dev = ndev;
2358 dev->phy.mode = dev->phy_mode;
2360 /* PHY-less configuration.
2361 * XXX I probably should move these settings to the dev tree
2363 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2366 /* PHY-less configuration.
2367 * XXX I probably should move these settings to the dev tree
2369 dev->phy.address = -1;
2370 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2376 mutex_lock(&emac_phy_map_lock);
2377 phy_map = dev->phy_map | busy_phy_map;
2379 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2381 dev->phy.mdio_read = emac_mdio_read;
2382 dev->phy.mdio_write = emac_mdio_write;
2384 /* Enable internal clock source */
2385 #ifdef CONFIG_PPC_DCR_NATIVE
2386 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2387 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2389 /* PHY clock workaround */
2390 emac_rx_clk_tx(dev);
2392 /* Enable internal clock source on 440GX*/
2393 #ifdef CONFIG_PPC_DCR_NATIVE
2394 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2395 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2397 /* Configure EMAC with defaults so we can at least use MDIO
2398 * This is needed mostly for 440GX
2400 if (emac_phy_gpcs(dev->phy.mode)) {
2402 * Make GPCS PHY address equal to EMAC index.
2403 * We probably should take into account busy_phy_map
2404 * and/or phy_map here.
2406 * Note that the busy_phy_map is currently global
2407 * while it should probably be per-ASIC...
2409 dev->phy.address = dev->cell_index;
2412 emac_configure(dev);
2414 if (dev->phy_address != 0xffffffff)
2415 phy_map = ~(1 << dev->phy_address);
2417 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2418 if (!(phy_map & 1)) {
2420 busy_phy_map |= 1 << i;
2422 /* Quick check if there is a PHY at the address */
2423 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2424 if (r == 0xffff || r < 0)
2426 if (!emac_mii_phy_probe(&dev->phy, i))
2430 /* Enable external clock source */
2431 #ifdef CONFIG_PPC_DCR_NATIVE
2432 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2433 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2435 mutex_unlock(&emac_phy_map_lock);
2437 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2442 if (dev->phy.def->ops->init)
2443 dev->phy.def->ops->init(&dev->phy);
2445 /* Disable any PHY features not supported by the platform */
2446 dev->phy.def->features &= ~dev->phy_feat_exc;
2448 /* Setup initial link parameters */
2449 if (dev->phy.features & SUPPORTED_Autoneg) {
2450 adv = dev->phy.features;
2451 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2452 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2453 /* Restart autonegotiation */
2454 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2456 u32 f = dev->phy.def->features;
2457 int speed = SPEED_10, fd = DUPLEX_HALF;
2459 /* Select highest supported speed/duplex */
2460 if (f & SUPPORTED_1000baseT_Full) {
2463 } else if (f & SUPPORTED_1000baseT_Half)
2465 else if (f & SUPPORTED_100baseT_Full) {
2468 } else if (f & SUPPORTED_100baseT_Half)
2470 else if (f & SUPPORTED_10baseT_Full)
2473 /* Force link parameters */
2474 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2479 static int __devinit emac_init_config(struct emac_instance *dev)
2481 struct device_node *np = dev->ofdev->node;
2484 const char *pm, *phy_modes[] = {
2486 [PHY_MODE_MII] = "mii",
2487 [PHY_MODE_RMII] = "rmii",
2488 [PHY_MODE_SMII] = "smii",
2489 [PHY_MODE_RGMII] = "rgmii",
2490 [PHY_MODE_TBI] = "tbi",
2491 [PHY_MODE_GMII] = "gmii",
2492 [PHY_MODE_RTBI] = "rtbi",
2493 [PHY_MODE_SGMII] = "sgmii",
2496 /* Read config from device-tree */
2497 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2499 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2501 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2503 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2505 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2506 dev->max_mtu = 1500;
2507 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2508 dev->rx_fifo_size = 2048;
2509 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2510 dev->tx_fifo_size = 2048;
2511 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2512 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2513 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2514 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2515 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2516 dev->phy_address = 0xffffffff;
2517 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2518 dev->phy_map = 0xffffffff;
2519 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2521 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2523 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2525 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2527 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2529 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2530 dev->zmii_port = 0xffffffff;;
2531 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2533 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2534 dev->rgmii_port = 0xffffffff;;
2535 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2536 dev->fifo_entry_size = 16;
2537 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2538 dev->mal_burst_size = 256;
2540 /* PHY mode needs some decoding */
2541 dev->phy_mode = PHY_MODE_NA;
2542 pm = of_get_property(np, "phy-mode", &plen);
2545 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2546 if (!strcasecmp(pm, phy_modes[i])) {
2552 /* Backward compat with non-final DT */
2553 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2554 u32 nmode = *(const u32 *)pm;
2555 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2556 dev->phy_mode = nmode;
2559 /* Check EMAC version */
2560 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2561 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2562 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2563 dev->features |= EMAC_FTR_EMAC4;
2564 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2565 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2567 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2568 of_device_is_compatible(np, "ibm,emac-440gr"))
2569 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2572 /* Fixup some feature bits based on the device tree */
2573 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2574 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2575 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2576 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2578 /* CAB lacks the appropriate properties */
2579 if (of_device_is_compatible(np, "ibm,emac-axon"))
2580 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2581 EMAC_FTR_STACR_OC_INVERT;
2583 /* Enable TAH/ZMII/RGMII features as found */
2584 if (dev->tah_ph != 0) {
2585 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2586 dev->features |= EMAC_FTR_HAS_TAH;
2588 printk(KERN_ERR "%s: TAH support not enabled !\n",
2594 if (dev->zmii_ph != 0) {
2595 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2596 dev->features |= EMAC_FTR_HAS_ZMII;
2598 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2604 if (dev->rgmii_ph != 0) {
2605 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2606 dev->features |= EMAC_FTR_HAS_RGMII;
2608 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2614 /* Read MAC-address */
2615 p = of_get_property(np, "local-mac-address", NULL);
2617 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2621 memcpy(dev->ndev->dev_addr, p, 6);
2623 /* IAHT and GAHT filter parameterization */
2624 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2625 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2626 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2628 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2629 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2632 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2633 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2634 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2635 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2636 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2641 static int __devinit emac_probe(struct of_device *ofdev,
2642 const struct of_device_id *match)
2644 struct net_device *ndev;
2645 struct emac_instance *dev;
2646 struct device_node *np = ofdev->node;
2647 struct device_node **blist = NULL;
2650 /* Skip unused/unwired EMACS. We leave the check for an unused
2651 * property here for now, but new flat device trees should set a
2652 * status property to "disabled" instead.
2654 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2657 /* Find ourselves in the bootlist if we are there */
2658 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2659 if (emac_boot_list[i] == np)
2660 blist = &emac_boot_list[i];
2662 /* Allocate our net_device structure */
2664 ndev = alloc_etherdev(sizeof(struct emac_instance));
2666 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2670 dev = netdev_priv(ndev);
2674 SET_NETDEV_DEV(ndev, &ofdev->dev);
2676 /* Initialize some embedded data structures */
2677 mutex_init(&dev->mdio_lock);
2678 mutex_init(&dev->link_lock);
2679 spin_lock_init(&dev->lock);
2680 INIT_WORK(&dev->reset_work, emac_reset_work);
2682 /* Init various config data based on device-tree */
2683 err = emac_init_config(dev);
2687 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2688 dev->emac_irq = irq_of_parse_and_map(np, 0);
2689 dev->wol_irq = irq_of_parse_and_map(np, 1);
2690 if (dev->emac_irq == NO_IRQ) {
2691 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2694 ndev->irq = dev->emac_irq;
2697 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2698 printk(KERN_ERR "%s: Can't get registers address\n",
2702 // TODO : request_mem_region
2703 dev->emacp = ioremap(dev->rsrc_regs.start,
2704 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2705 if (dev->emacp == NULL) {
2706 printk(KERN_ERR "%s: Can't map device registers!\n",
2712 /* Wait for dependent devices */
2713 err = emac_wait_deps(dev);
2716 "%s: Timeout waiting for dependent devices\n",
2718 /* display more info about what's missing ? */
2721 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2722 if (dev->mdio_dev != NULL)
2723 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2725 /* Register with MAL */
2726 dev->commac.ops = &emac_commac_ops;
2727 dev->commac.dev = dev;
2728 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2729 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2730 err = mal_register_commac(dev->mal, &dev->commac);
2732 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2733 np->full_name, dev->mal_dev->node->full_name);
2736 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2737 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2739 /* Get pointers to BD rings */
2741 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2743 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2745 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2746 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2749 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2750 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2751 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2752 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2754 /* Attach to ZMII, if needed */
2755 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2756 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2757 goto err_unreg_commac;
2759 /* Attach to RGMII, if needed */
2760 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2761 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2762 goto err_detach_zmii;
2764 /* Attach to TAH, if needed */
2765 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2766 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2767 goto err_detach_rgmii;
2769 /* Set some link defaults before we can find out real parameters */
2770 dev->phy.speed = SPEED_100;
2771 dev->phy.duplex = DUPLEX_FULL;
2772 dev->phy.autoneg = AUTONEG_DISABLE;
2773 dev->phy.pause = dev->phy.asym_pause = 0;
2774 dev->stop_timeout = STOP_TIMEOUT_100;
2775 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2777 /* Find PHY if any */
2778 err = emac_init_phy(dev);
2780 goto err_detach_tah;
2782 /* Fill in the driver function table */
2783 ndev->open = &emac_open;
2785 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2786 ndev->tx_timeout = &emac_tx_timeout;
2787 ndev->watchdog_timeo = 5 * HZ;
2788 ndev->stop = &emac_close;
2789 ndev->get_stats = &emac_stats;
2790 ndev->set_multicast_list = &emac_set_multicast_list;
2791 ndev->do_ioctl = &emac_ioctl;
2792 if (emac_phy_supports_gige(dev->phy_mode)) {
2793 ndev->hard_start_xmit = &emac_start_xmit_sg;
2794 ndev->change_mtu = &emac_change_mtu;
2795 dev->commac.ops = &emac_commac_sg_ops;
2797 ndev->hard_start_xmit = &emac_start_xmit;
2799 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2801 netif_carrier_off(ndev);
2802 netif_stop_queue(ndev);
2804 err = register_netdev(ndev);
2806 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2807 np->full_name, err);
2808 goto err_detach_tah;
2811 /* Set our drvdata last as we don't want them visible until we are
2815 dev_set_drvdata(&ofdev->dev, dev);
2817 /* There's a new kid in town ! Let's tell everybody */
2818 wake_up_all(&emac_probe_wait);
2822 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2823 ndev->name, dev->cell_index, np->full_name,
2824 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2825 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2827 if (dev->phy.address >= 0)
2828 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2829 dev->phy.def->name, dev->phy.address);
2831 emac_dbg_register(dev);
2836 /* I have a bad feeling about this ... */
2839 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2840 tah_detach(dev->tah_dev, dev->tah_port);
2842 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2843 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2845 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2846 zmii_detach(dev->zmii_dev, dev->zmii_port);
2848 mal_unregister_commac(dev->mal, &dev->commac);
2852 iounmap(dev->emacp);
2854 if (dev->wol_irq != NO_IRQ)
2855 irq_dispose_mapping(dev->wol_irq);
2856 if (dev->emac_irq != NO_IRQ)
2857 irq_dispose_mapping(dev->emac_irq);
2861 /* if we were on the bootlist, remove us as we won't show up and
2862 * wake up all waiters to notify them in case they were waiting
2867 wake_up_all(&emac_probe_wait);
2872 static int __devexit emac_remove(struct of_device *ofdev)
2874 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2876 DBG(dev, "remove" NL);
2878 dev_set_drvdata(&ofdev->dev, NULL);
2880 unregister_netdev(dev->ndev);
2882 flush_scheduled_work();
2884 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2885 tah_detach(dev->tah_dev, dev->tah_port);
2886 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2887 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2888 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2889 zmii_detach(dev->zmii_dev, dev->zmii_port);
2891 mal_unregister_commac(dev->mal, &dev->commac);
2894 emac_dbg_unregister(dev);
2895 iounmap(dev->emacp);
2897 if (dev->wol_irq != NO_IRQ)
2898 irq_dispose_mapping(dev->wol_irq);
2899 if (dev->emac_irq != NO_IRQ)
2900 irq_dispose_mapping(dev->emac_irq);
2907 /* XXX Features in here should be replaced by properties... */
2908 static struct of_device_id emac_match[] =
2912 .compatible = "ibm,emac",
2916 .compatible = "ibm,emac4",
2920 .compatible = "ibm,emac4sync",
2925 static struct of_platform_driver emac_driver = {
2927 .match_table = emac_match,
2929 .probe = emac_probe,
2930 .remove = emac_remove,
2933 static void __init emac_make_bootlist(void)
2935 struct device_node *np = NULL;
2936 int j, max, i = 0, k;
2937 int cell_indices[EMAC_BOOT_LIST_SIZE];
2940 while((np = of_find_all_nodes(np)) != NULL) {
2943 if (of_match_node(emac_match, np) == NULL)
2945 if (of_get_property(np, "unused", NULL))
2947 idx = of_get_property(np, "cell-index", NULL);
2950 cell_indices[i] = *idx;
2951 emac_boot_list[i++] = of_node_get(np);
2952 if (i >= EMAC_BOOT_LIST_SIZE) {
2959 /* Bubble sort them (doh, what a creative algorithm :-) */
2960 for (i = 0; max > 1 && (i < (max - 1)); i++)
2961 for (j = i; j < max; j++) {
2962 if (cell_indices[i] > cell_indices[j]) {
2963 np = emac_boot_list[i];
2964 emac_boot_list[i] = emac_boot_list[j];
2965 emac_boot_list[j] = np;
2966 k = cell_indices[i];
2967 cell_indices[i] = cell_indices[j];
2968 cell_indices[j] = k;
2973 static int __init emac_init(void)
2977 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2979 /* Init debug stuff */
2982 /* Build EMAC boot list */
2983 emac_make_bootlist();
2985 /* Init submodules */
2998 rc = of_register_platform_driver(&emac_driver);
3016 static void __exit emac_exit(void)
3020 of_unregister_platform_driver(&emac_driver);
3028 /* Destroy EMAC boot list */
3029 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3030 if (emac_boot_list[i])
3031 of_node_put(emac_boot_list[i]);
3034 module_init(emac_init);
3035 module_exit(emac_exit);