2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
208 static inline int emac_phy_gpcs(int phy_mode)
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
214 static inline void emac_tx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 DBG(dev, "tx_enable" NL);
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 static void emac_tx_disable(struct emac_instance *dev)
228 struct emac_regs __iomem *p = dev->emacp;
231 DBG(dev, "tx_disable" NL);
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
242 emac_report_timeout_error(dev, "TX disable timeout");
246 static void emac_rx_enable(struct emac_instance *dev)
248 struct emac_regs __iomem *p = dev->emacp;
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 DBG(dev, "rx_enable" NL);
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
275 static void emac_rx_disable(struct emac_instance *dev)
277 struct emac_regs __iomem *p = dev->emacp;
280 DBG(dev, "rx_disable" NL);
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 emac_report_timeout_error(dev, "RX disable timeout");
295 static inline void emac_netif_stop(struct emac_instance *dev)
297 netif_tx_lock_bh(dev->ndev);
298 netif_addr_lock(dev->ndev);
300 netif_addr_unlock(dev->ndev);
301 netif_tx_unlock_bh(dev->ndev);
302 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
303 mal_poll_disable(dev->mal, &dev->commac);
304 netif_tx_disable(dev->ndev);
307 static inline void emac_netif_start(struct emac_instance *dev)
309 netif_tx_lock_bh(dev->ndev);
310 netif_addr_lock(dev->ndev);
312 if (dev->mcast_pending && netif_running(dev->ndev))
313 __emac_set_multicast_list(dev);
314 netif_addr_unlock(dev->ndev);
315 netif_tx_unlock_bh(dev->ndev);
317 netif_wake_queue(dev->ndev);
319 /* NOTE: unconditional netif_wake_queue is only appropriate
320 * so long as all callers are assured to have free tx slots
321 * (taken from tg3... though the case where that is wrong is
322 * not terribly harmful)
324 mal_poll_enable(dev->mal, &dev->commac);
327 static inline void emac_rx_disable_async(struct emac_instance *dev)
329 struct emac_regs __iomem *p = dev->emacp;
332 DBG(dev, "rx_disable_async" NL);
334 r = in_be32(&p->mr0);
335 if (r & EMAC_MR0_RXE)
336 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 static int emac_reset(struct emac_instance *dev)
341 struct emac_regs __iomem *p = dev->emacp;
344 DBG(dev, "reset" NL);
346 if (!dev->reset_failed) {
347 /* 40x erratum suggests stopping RX channel before reset,
350 emac_rx_disable(dev);
351 emac_tx_disable(dev);
354 out_be32(&p->mr0, EMAC_MR0_SRST);
355 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
359 dev->reset_failed = 0;
362 emac_report_timeout_error(dev, "reset timeout");
363 dev->reset_failed = 1;
368 static void emac_hash_mc(struct emac_instance *dev)
370 struct emac_regs __iomem *p = dev->emacp;
372 struct dev_mc_list *dmi;
374 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
376 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
378 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
379 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
380 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
382 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
383 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
385 out_be32(&p->gaht1, gaht[0]);
386 out_be32(&p->gaht2, gaht[1]);
387 out_be32(&p->gaht3, gaht[2]);
388 out_be32(&p->gaht4, gaht[3]);
391 static inline u32 emac_iff2rmr(struct net_device *ndev)
393 struct emac_instance *dev = netdev_priv(ndev);
396 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
398 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
403 if (ndev->flags & IFF_PROMISC)
405 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
407 else if (ndev->mc_count > 0)
413 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
415 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
417 DBG2(dev, "__emac_calc_base_mr1" NL);
421 ret |= EMAC_MR1_TFS_2K;
424 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
425 dev->ndev->name, tx_size);
430 ret |= EMAC_MR1_RFS_16K;
433 ret |= EMAC_MR1_RFS_4K;
436 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
437 dev->ndev->name, rx_size);
443 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
445 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
446 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
448 DBG2(dev, "__emac4_calc_base_mr1" NL);
452 ret |= EMAC4_MR1_TFS_4K;
455 ret |= EMAC4_MR1_TFS_2K;
458 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
459 dev->ndev->name, tx_size);
464 ret |= EMAC4_MR1_RFS_16K;
467 ret |= EMAC4_MR1_RFS_4K;
470 ret |= EMAC4_MR1_RFS_2K;
473 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
474 dev->ndev->name, rx_size);
480 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
482 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
483 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
484 __emac_calc_base_mr1(dev, tx_size, rx_size);
487 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
489 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
490 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
492 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
495 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
496 unsigned int low, unsigned int high)
498 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
499 return (low << 22) | ( (high & 0x3ff) << 6);
501 return (low << 23) | ( (high & 0x1ff) << 7);
504 static int emac_configure(struct emac_instance *dev)
506 struct emac_regs __iomem *p = dev->emacp;
507 struct net_device *ndev = dev->ndev;
508 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
511 DBG(dev, "configure" NL);
514 out_be32(&p->mr1, in_be32(&p->mr1)
515 | EMAC_MR1_FDE | EMAC_MR1_ILE);
517 } else if (emac_reset(dev) < 0)
520 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
521 tah_reset(dev->tah_dev);
523 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
524 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
526 /* Default fifo sizes */
527 tx_size = dev->tx_fifo_size;
528 rx_size = dev->rx_fifo_size;
530 /* No link, force loopback */
532 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
534 /* Check for full duplex */
535 else if (dev->phy.duplex == DUPLEX_FULL)
536 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
538 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
539 dev->stop_timeout = STOP_TIMEOUT_10;
540 switch (dev->phy.speed) {
542 if (emac_phy_gpcs(dev->phy.mode)) {
543 mr1 |= EMAC_MR1_MF_1000GPCS |
544 EMAC_MR1_MF_IPPA(dev->phy.address);
546 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
547 * identify this GPCS PHY later.
549 out_be32(&p->ipcr, 0xdeadbeef);
551 mr1 |= EMAC_MR1_MF_1000;
553 /* Extended fifo sizes */
554 tx_size = dev->tx_fifo_size_gige;
555 rx_size = dev->rx_fifo_size_gige;
557 if (dev->ndev->mtu > ETH_DATA_LEN) {
558 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559 mr1 |= EMAC4_MR1_JPSM;
561 mr1 |= EMAC_MR1_JPSM;
562 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
564 dev->stop_timeout = STOP_TIMEOUT_1000;
567 mr1 |= EMAC_MR1_MF_100;
568 dev->stop_timeout = STOP_TIMEOUT_100;
570 default: /* make gcc happy */
574 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
575 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
577 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
578 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
580 /* on 40x erratum forces us to NOT use integrated flow control,
581 * let's hope it works on 44x ;)
583 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
584 dev->phy.duplex == DUPLEX_FULL) {
586 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
587 else if (dev->phy.asym_pause)
591 /* Add base settings & fifo sizes & program MR1 */
592 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
593 out_be32(&p->mr1, mr1);
595 /* Set individual MAC address */
596 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
597 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
598 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
601 /* VLAN Tag Protocol ID */
602 out_be32(&p->vtpid, 0x8100);
604 /* Receive mode register */
605 r = emac_iff2rmr(ndev);
606 if (r & EMAC_RMR_MAE)
608 out_be32(&p->rmr, r);
610 /* FIFOs thresholds */
611 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
612 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
613 tx_size / 2 / dev->fifo_entry_size);
615 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
616 tx_size / 2 / dev->fifo_entry_size);
617 out_be32(&p->tmr1, r);
618 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
620 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
621 there should be still enough space in FIFO to allow the our link
622 partner time to process this frame and also time to send PAUSE
625 Here is the worst case scenario for the RX FIFO "headroom"
626 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
628 1) One maximum-length frame on TX 1522 bytes
629 2) One PAUSE frame time 64 bytes
630 3) PAUSE frame decode time allowance 64 bytes
631 4) One maximum-length frame on RX 1522 bytes
632 5) Round-trip propagation delay of the link (100Mb) 15 bytes
636 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
637 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
639 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
640 rx_size / 4 / dev->fifo_entry_size);
641 out_be32(&p->rwmr, r);
643 /* Set PAUSE timer to the maximum */
644 out_be32(&p->ptr, 0xffff);
647 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
648 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
649 EMAC_ISR_IRE | EMAC_ISR_TE;
650 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
651 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
653 out_be32(&p->iser, r);
655 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
656 if (emac_phy_gpcs(dev->phy.mode))
657 emac_mii_reset_phy(&dev->phy);
659 /* Required for Pause packet support in EMAC */
660 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
665 static void emac_reinitialize(struct emac_instance *dev)
667 DBG(dev, "reinitialize" NL);
669 emac_netif_stop(dev);
670 if (!emac_configure(dev)) {
674 emac_netif_start(dev);
677 static void emac_full_tx_reset(struct emac_instance *dev)
679 DBG(dev, "full_tx_reset" NL);
681 emac_tx_disable(dev);
682 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
683 emac_clean_tx_ring(dev);
684 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
688 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
693 static void emac_reset_work(struct work_struct *work)
695 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
697 DBG(dev, "reset_work" NL);
699 mutex_lock(&dev->link_lock);
701 emac_netif_stop(dev);
702 emac_full_tx_reset(dev);
703 emac_netif_start(dev);
705 mutex_unlock(&dev->link_lock);
708 static void emac_tx_timeout(struct net_device *ndev)
710 struct emac_instance *dev = netdev_priv(ndev);
712 DBG(dev, "tx_timeout" NL);
714 schedule_work(&dev->reset_work);
718 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
720 int done = !!(stacr & EMAC_STACR_OC);
722 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
728 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
730 struct emac_regs __iomem *p = dev->emacp;
732 int n, err = -ETIMEDOUT;
734 mutex_lock(&dev->mdio_lock);
736 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
738 /* Enable proper MDIO port */
739 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
740 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
741 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
742 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
744 /* Wait for management interface to become idle */
746 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
749 DBG2(dev, " -> timeout wait idle\n");
754 /* Issue read command */
755 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
756 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
758 r = EMAC_STACR_BASE(dev->opb_bus_freq);
759 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
761 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
762 r |= EMACX_STACR_STAC_READ;
764 r |= EMAC_STACR_STAC_READ;
765 r |= (reg & EMAC_STACR_PRA_MASK)
766 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
767 out_be32(&p->stacr, r);
769 /* Wait for read to complete */
771 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
774 DBG2(dev, " -> timeout wait complete\n");
779 if (unlikely(r & EMAC_STACR_PHYE)) {
780 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
785 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
787 DBG2(dev, "mdio_read -> %04x" NL, r);
790 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
791 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
792 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
793 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
794 mutex_unlock(&dev->mdio_lock);
796 return err == 0 ? r : err;
799 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
802 struct emac_regs __iomem *p = dev->emacp;
804 int n, err = -ETIMEDOUT;
806 mutex_lock(&dev->mdio_lock);
808 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
810 /* Enable proper MDIO port */
811 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
813 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
814 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
816 /* Wait for management interface to be idle */
818 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
821 DBG2(dev, " -> timeout wait idle\n");
826 /* Issue write command */
827 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
828 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
830 r = EMAC_STACR_BASE(dev->opb_bus_freq);
831 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
833 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
834 r |= EMACX_STACR_STAC_WRITE;
836 r |= EMAC_STACR_STAC_WRITE;
837 r |= (reg & EMAC_STACR_PRA_MASK) |
838 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
839 (val << EMAC_STACR_PHYD_SHIFT);
840 out_be32(&p->stacr, r);
842 /* Wait for write to complete */
844 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
847 DBG2(dev, " -> timeout wait complete\n");
853 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
854 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
855 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
856 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
857 mutex_unlock(&dev->mdio_lock);
860 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
862 struct emac_instance *dev = netdev_priv(ndev);
865 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
870 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
872 struct emac_instance *dev = netdev_priv(ndev);
874 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
875 (u8) id, (u8) reg, (u16) val);
879 static void __emac_set_multicast_list(struct emac_instance *dev)
881 struct emac_regs __iomem *p = dev->emacp;
882 u32 rmr = emac_iff2rmr(dev->ndev);
884 DBG(dev, "__multicast %08x" NL, rmr);
886 /* I decided to relax register access rules here to avoid
889 * There is a real problem with EMAC4 core if we use MWSW_001 bit
890 * in MR1 register and do a full EMAC reset.
891 * One TX BD status update is delayed and, after EMAC reset, it
892 * never happens, resulting in TX hung (it'll be recovered by TX
893 * timeout handler eventually, but this is just gross).
894 * So we either have to do full TX reset or try to cheat here :)
896 * The only required change is to RX mode register, so I *think* all
897 * we need is just to stop RX channel. This seems to work on all
900 * If we need the full reset, we might just trigger the workqueue
901 * and do it async... a bit nasty but should work --BenH
903 dev->mcast_pending = 0;
904 emac_rx_disable(dev);
905 if (rmr & EMAC_RMR_MAE)
907 out_be32(&p->rmr, rmr);
912 static void emac_set_multicast_list(struct net_device *ndev)
914 struct emac_instance *dev = netdev_priv(ndev);
916 DBG(dev, "multicast" NL);
918 BUG_ON(!netif_running(dev->ndev));
921 dev->mcast_pending = 1;
924 __emac_set_multicast_list(dev);
927 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
929 int rx_sync_size = emac_rx_sync_size(new_mtu);
930 int rx_skb_size = emac_rx_skb_size(new_mtu);
933 mutex_lock(&dev->link_lock);
934 emac_netif_stop(dev);
935 emac_rx_disable(dev);
936 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
938 if (dev->rx_sg_skb) {
939 ++dev->estats.rx_dropped_resize;
940 dev_kfree_skb(dev->rx_sg_skb);
941 dev->rx_sg_skb = NULL;
944 /* Make a first pass over RX ring and mark BDs ready, dropping
945 * non-processed packets on the way. We need this as a separate pass
946 * to simplify error recovery in the case of allocation failure later.
948 for (i = 0; i < NUM_RX_BUFF; ++i) {
949 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
950 ++dev->estats.rx_dropped_resize;
952 dev->rx_desc[i].data_len = 0;
953 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
954 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
957 /* Reallocate RX ring only if bigger skb buffers are required */
958 if (rx_skb_size <= dev->rx_skb_size)
961 /* Second pass, allocate new skbs */
962 for (i = 0; i < NUM_RX_BUFF; ++i) {
963 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
969 BUG_ON(!dev->rx_skb[i]);
970 dev_kfree_skb(dev->rx_skb[i]);
972 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
973 dev->rx_desc[i].data_ptr =
974 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
975 DMA_FROM_DEVICE) + 2;
976 dev->rx_skb[i] = skb;
979 /* Check if we need to change "Jumbo" bit in MR1 */
980 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
981 /* This is to prevent starting RX channel in emac_rx_enable() */
982 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
984 dev->ndev->mtu = new_mtu;
985 emac_full_tx_reset(dev);
988 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
991 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
993 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
995 emac_netif_start(dev);
996 mutex_unlock(&dev->link_lock);
1001 /* Process ctx, rtnl_lock semaphore */
1002 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1004 struct emac_instance *dev = netdev_priv(ndev);
1007 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1010 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1012 if (netif_running(ndev)) {
1013 /* Check if we really need to reinitalize RX ring */
1014 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1015 ret = emac_resize_rx_ring(dev, new_mtu);
1019 ndev->mtu = new_mtu;
1020 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1021 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1027 static void emac_clean_tx_ring(struct emac_instance *dev)
1031 for (i = 0; i < NUM_TX_BUFF; ++i) {
1032 if (dev->tx_skb[i]) {
1033 dev_kfree_skb(dev->tx_skb[i]);
1034 dev->tx_skb[i] = NULL;
1035 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1036 ++dev->estats.tx_dropped;
1038 dev->tx_desc[i].ctrl = 0;
1039 dev->tx_desc[i].data_ptr = 0;
1043 static void emac_clean_rx_ring(struct emac_instance *dev)
1047 for (i = 0; i < NUM_RX_BUFF; ++i)
1048 if (dev->rx_skb[i]) {
1049 dev->rx_desc[i].ctrl = 0;
1050 dev_kfree_skb(dev->rx_skb[i]);
1051 dev->rx_skb[i] = NULL;
1052 dev->rx_desc[i].data_ptr = 0;
1055 if (dev->rx_sg_skb) {
1056 dev_kfree_skb(dev->rx_sg_skb);
1057 dev->rx_sg_skb = NULL;
1061 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1064 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1068 dev->rx_skb[slot] = skb;
1069 dev->rx_desc[slot].data_len = 0;
1071 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1072 dev->rx_desc[slot].data_ptr =
1073 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1074 DMA_FROM_DEVICE) + 2;
1076 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1077 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1082 static void emac_print_link_status(struct emac_instance *dev)
1084 if (netif_carrier_ok(dev->ndev))
1085 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1086 dev->ndev->name, dev->phy.speed,
1087 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1088 dev->phy.pause ? ", pause enabled" :
1089 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1091 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1094 /* Process ctx, rtnl_lock semaphore */
1095 static int emac_open(struct net_device *ndev)
1097 struct emac_instance *dev = netdev_priv(ndev);
1100 DBG(dev, "open" NL);
1102 /* Setup error IRQ handler */
1103 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1105 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1106 ndev->name, dev->emac_irq);
1110 /* Allocate RX ring */
1111 for (i = 0; i < NUM_RX_BUFF; ++i)
1112 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1113 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1118 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1119 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1120 dev->rx_sg_skb = NULL;
1122 mutex_lock(&dev->link_lock);
1125 /* Start PHY polling now.
1127 if (dev->phy.address >= 0) {
1128 int link_poll_interval;
1129 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1130 dev->phy.def->ops->read_link(&dev->phy);
1131 emac_rx_clk_default(dev);
1132 netif_carrier_on(dev->ndev);
1133 link_poll_interval = PHY_POLL_LINK_ON;
1135 emac_rx_clk_tx(dev);
1136 netif_carrier_off(dev->ndev);
1137 link_poll_interval = PHY_POLL_LINK_OFF;
1139 dev->link_polling = 1;
1141 schedule_delayed_work(&dev->link_work, link_poll_interval);
1142 emac_print_link_status(dev);
1144 netif_carrier_on(dev->ndev);
1146 emac_configure(dev);
1147 mal_poll_add(dev->mal, &dev->commac);
1148 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1149 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1150 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1151 emac_tx_enable(dev);
1152 emac_rx_enable(dev);
1153 emac_netif_start(dev);
1155 mutex_unlock(&dev->link_lock);
1159 emac_clean_rx_ring(dev);
1160 free_irq(dev->emac_irq, dev);
1167 static int emac_link_differs(struct emac_instance *dev)
1169 u32 r = in_be32(&dev->emacp->mr1);
1171 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1172 int speed, pause, asym_pause;
1174 if (r & EMAC_MR1_MF_1000)
1176 else if (r & EMAC_MR1_MF_100)
1181 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1182 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1191 pause = asym_pause = 0;
1193 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1194 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1198 static void emac_link_timer(struct work_struct *work)
1200 struct emac_instance *dev =
1201 container_of((struct delayed_work *)work,
1202 struct emac_instance, link_work);
1203 int link_poll_interval;
1205 mutex_lock(&dev->link_lock);
1206 DBG2(dev, "link timer" NL);
1211 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1212 if (!netif_carrier_ok(dev->ndev)) {
1213 emac_rx_clk_default(dev);
1214 /* Get new link parameters */
1215 dev->phy.def->ops->read_link(&dev->phy);
1217 netif_carrier_on(dev->ndev);
1218 emac_netif_stop(dev);
1219 emac_full_tx_reset(dev);
1220 emac_netif_start(dev);
1221 emac_print_link_status(dev);
1223 link_poll_interval = PHY_POLL_LINK_ON;
1225 if (netif_carrier_ok(dev->ndev)) {
1226 emac_rx_clk_tx(dev);
1227 netif_carrier_off(dev->ndev);
1228 netif_tx_disable(dev->ndev);
1229 emac_reinitialize(dev);
1230 emac_print_link_status(dev);
1232 link_poll_interval = PHY_POLL_LINK_OFF;
1234 schedule_delayed_work(&dev->link_work, link_poll_interval);
1236 mutex_unlock(&dev->link_lock);
1239 static void emac_force_link_update(struct emac_instance *dev)
1241 netif_carrier_off(dev->ndev);
1243 if (dev->link_polling) {
1244 cancel_rearming_delayed_work(&dev->link_work);
1245 if (dev->link_polling)
1246 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1250 /* Process ctx, rtnl_lock semaphore */
1251 static int emac_close(struct net_device *ndev)
1253 struct emac_instance *dev = netdev_priv(ndev);
1255 DBG(dev, "close" NL);
1257 if (dev->phy.address >= 0) {
1258 dev->link_polling = 0;
1259 cancel_rearming_delayed_work(&dev->link_work);
1261 mutex_lock(&dev->link_lock);
1262 emac_netif_stop(dev);
1264 mutex_unlock(&dev->link_lock);
1266 emac_rx_disable(dev);
1267 emac_tx_disable(dev);
1268 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1269 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1270 mal_poll_del(dev->mal, &dev->commac);
1272 emac_clean_tx_ring(dev);
1273 emac_clean_rx_ring(dev);
1275 free_irq(dev->emac_irq, dev);
1280 static inline u16 emac_tx_csum(struct emac_instance *dev,
1281 struct sk_buff *skb)
1283 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1284 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1285 ++dev->stats.tx_packets_csum;
1286 return EMAC_TX_CTRL_TAH_CSUM;
1291 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1293 struct emac_regs __iomem *p = dev->emacp;
1294 struct net_device *ndev = dev->ndev;
1296 /* Send the packet out. If the if makes a significant perf
1297 * difference, then we can store the TMR0 value in "dev"
1300 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1301 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1303 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1305 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1306 netif_stop_queue(ndev);
1307 DBG2(dev, "stopped TX queue" NL);
1310 ndev->trans_start = jiffies;
1311 ++dev->stats.tx_packets;
1312 dev->stats.tx_bytes += len;
1318 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1320 struct emac_instance *dev = netdev_priv(ndev);
1321 unsigned int len = skb->len;
1324 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1325 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1327 slot = dev->tx_slot++;
1328 if (dev->tx_slot == NUM_TX_BUFF) {
1330 ctrl |= MAL_TX_CTRL_WRAP;
1333 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1335 dev->tx_skb[slot] = skb;
1336 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1339 dev->tx_desc[slot].data_len = (u16) len;
1341 dev->tx_desc[slot].ctrl = ctrl;
1343 return emac_xmit_finish(dev, len);
1346 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1347 u32 pd, int len, int last, u16 base_ctrl)
1350 u16 ctrl = base_ctrl;
1351 int chunk = min(len, MAL_MAX_TX_SIZE);
1354 slot = (slot + 1) % NUM_TX_BUFF;
1357 ctrl |= MAL_TX_CTRL_LAST;
1358 if (slot == NUM_TX_BUFF - 1)
1359 ctrl |= MAL_TX_CTRL_WRAP;
1361 dev->tx_skb[slot] = NULL;
1362 dev->tx_desc[slot].data_ptr = pd;
1363 dev->tx_desc[slot].data_len = (u16) chunk;
1364 dev->tx_desc[slot].ctrl = ctrl;
1375 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1376 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1378 struct emac_instance *dev = netdev_priv(ndev);
1379 int nr_frags = skb_shinfo(skb)->nr_frags;
1380 int len = skb->len, chunk;
1385 /* This is common "fast" path */
1386 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1387 return emac_start_xmit(skb, ndev);
1389 len -= skb->data_len;
1391 /* Note, this is only an *estimation*, we can still run out of empty
1392 * slots because of the additional fragmentation into
1393 * MAL_MAX_TX_SIZE-sized chunks
1395 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1398 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1399 emac_tx_csum(dev, skb);
1400 slot = dev->tx_slot;
1403 dev->tx_skb[slot] = NULL;
1404 chunk = min(len, MAL_MAX_TX_SIZE);
1405 dev->tx_desc[slot].data_ptr = pd =
1406 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1407 dev->tx_desc[slot].data_len = (u16) chunk;
1410 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1413 for (i = 0; i < nr_frags; ++i) {
1414 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1417 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1420 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1423 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1427 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1429 /* Attach skb to the last slot so we don't release it too early */
1430 dev->tx_skb[slot] = skb;
1432 /* Send the packet out */
1433 if (dev->tx_slot == NUM_TX_BUFF - 1)
1434 ctrl |= MAL_TX_CTRL_WRAP;
1436 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1437 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1439 return emac_xmit_finish(dev, skb->len);
1442 /* Well, too bad. Our previous estimation was overly optimistic.
1445 while (slot != dev->tx_slot) {
1446 dev->tx_desc[slot].ctrl = 0;
1449 slot = NUM_TX_BUFF - 1;
1451 ++dev->estats.tx_undo;
1454 netif_stop_queue(ndev);
1455 DBG2(dev, "stopped TX queue" NL);
1460 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1462 struct emac_error_stats *st = &dev->estats;
1464 DBG(dev, "BD TX error %04x" NL, ctrl);
1467 if (ctrl & EMAC_TX_ST_BFCS)
1468 ++st->tx_bd_bad_fcs;
1469 if (ctrl & EMAC_TX_ST_LCS)
1470 ++st->tx_bd_carrier_loss;
1471 if (ctrl & EMAC_TX_ST_ED)
1472 ++st->tx_bd_excessive_deferral;
1473 if (ctrl & EMAC_TX_ST_EC)
1474 ++st->tx_bd_excessive_collisions;
1475 if (ctrl & EMAC_TX_ST_LC)
1476 ++st->tx_bd_late_collision;
1477 if (ctrl & EMAC_TX_ST_MC)
1478 ++st->tx_bd_multple_collisions;
1479 if (ctrl & EMAC_TX_ST_SC)
1480 ++st->tx_bd_single_collision;
1481 if (ctrl & EMAC_TX_ST_UR)
1482 ++st->tx_bd_underrun;
1483 if (ctrl & EMAC_TX_ST_SQE)
1487 static void emac_poll_tx(void *param)
1489 struct emac_instance *dev = param;
1492 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1494 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1495 bad_mask = EMAC_IS_BAD_TX_TAH;
1497 bad_mask = EMAC_IS_BAD_TX;
1499 netif_tx_lock_bh(dev->ndev);
1502 int slot = dev->ack_slot, n = 0;
1504 ctrl = dev->tx_desc[slot].ctrl;
1505 if (!(ctrl & MAL_TX_CTRL_READY)) {
1506 struct sk_buff *skb = dev->tx_skb[slot];
1511 dev->tx_skb[slot] = NULL;
1513 slot = (slot + 1) % NUM_TX_BUFF;
1515 if (unlikely(ctrl & bad_mask))
1516 emac_parse_tx_error(dev, ctrl);
1522 dev->ack_slot = slot;
1523 if (netif_queue_stopped(dev->ndev) &&
1524 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1525 netif_wake_queue(dev->ndev);
1527 DBG2(dev, "tx %d pkts" NL, n);
1530 netif_tx_unlock_bh(dev->ndev);
1533 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1536 struct sk_buff *skb = dev->rx_skb[slot];
1538 DBG2(dev, "recycle %d %d" NL, slot, len);
1541 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1542 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1544 dev->rx_desc[slot].data_len = 0;
1546 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1547 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1550 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1552 struct emac_error_stats *st = &dev->estats;
1554 DBG(dev, "BD RX error %04x" NL, ctrl);
1557 if (ctrl & EMAC_RX_ST_OE)
1558 ++st->rx_bd_overrun;
1559 if (ctrl & EMAC_RX_ST_BP)
1560 ++st->rx_bd_bad_packet;
1561 if (ctrl & EMAC_RX_ST_RP)
1562 ++st->rx_bd_runt_packet;
1563 if (ctrl & EMAC_RX_ST_SE)
1564 ++st->rx_bd_short_event;
1565 if (ctrl & EMAC_RX_ST_AE)
1566 ++st->rx_bd_alignment_error;
1567 if (ctrl & EMAC_RX_ST_BFCS)
1568 ++st->rx_bd_bad_fcs;
1569 if (ctrl & EMAC_RX_ST_PTL)
1570 ++st->rx_bd_packet_too_long;
1571 if (ctrl & EMAC_RX_ST_ORE)
1572 ++st->rx_bd_out_of_range;
1573 if (ctrl & EMAC_RX_ST_IRE)
1574 ++st->rx_bd_in_range;
1577 static inline void emac_rx_csum(struct emac_instance *dev,
1578 struct sk_buff *skb, u16 ctrl)
1580 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1581 if (!ctrl && dev->tah_dev) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 ++dev->stats.rx_packets_csum;
1588 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1590 if (likely(dev->rx_sg_skb != NULL)) {
1591 int len = dev->rx_desc[slot].data_len;
1592 int tot_len = dev->rx_sg_skb->len + len;
1594 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1595 ++dev->estats.rx_dropped_mtu;
1596 dev_kfree_skb(dev->rx_sg_skb);
1597 dev->rx_sg_skb = NULL;
1599 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1600 dev->rx_skb[slot]->data, len);
1601 skb_put(dev->rx_sg_skb, len);
1602 emac_recycle_rx_skb(dev, slot, len);
1606 emac_recycle_rx_skb(dev, slot, 0);
1610 /* NAPI poll context */
1611 static int emac_poll_rx(void *param, int budget)
1613 struct emac_instance *dev = param;
1614 int slot = dev->rx_slot, received = 0;
1616 DBG2(dev, "poll_rx(%d)" NL, budget);
1619 while (budget > 0) {
1621 struct sk_buff *skb;
1622 u16 ctrl = dev->rx_desc[slot].ctrl;
1624 if (ctrl & MAL_RX_CTRL_EMPTY)
1627 skb = dev->rx_skb[slot];
1629 len = dev->rx_desc[slot].data_len;
1631 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1634 ctrl &= EMAC_BAD_RX_MASK;
1635 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1636 emac_parse_rx_error(dev, ctrl);
1637 ++dev->estats.rx_dropped_error;
1638 emac_recycle_rx_skb(dev, slot, 0);
1643 if (len < ETH_HLEN) {
1644 ++dev->estats.rx_dropped_stack;
1645 emac_recycle_rx_skb(dev, slot, len);
1649 if (len && len < EMAC_RX_COPY_THRESH) {
1650 struct sk_buff *copy_skb =
1651 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1652 if (unlikely(!copy_skb))
1655 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1656 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1658 emac_recycle_rx_skb(dev, slot, len);
1660 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1665 skb->dev = dev->ndev;
1666 skb->protocol = eth_type_trans(skb, dev->ndev);
1667 emac_rx_csum(dev, skb, ctrl);
1669 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1670 ++dev->estats.rx_dropped_stack;
1672 ++dev->stats.rx_packets;
1674 dev->stats.rx_bytes += len;
1675 slot = (slot + 1) % NUM_RX_BUFF;
1680 if (ctrl & MAL_RX_CTRL_FIRST) {
1681 BUG_ON(dev->rx_sg_skb);
1682 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1683 DBG(dev, "rx OOM %d" NL, slot);
1684 ++dev->estats.rx_dropped_oom;
1685 emac_recycle_rx_skb(dev, slot, 0);
1687 dev->rx_sg_skb = skb;
1690 } else if (!emac_rx_sg_append(dev, slot) &&
1691 (ctrl & MAL_RX_CTRL_LAST)) {
1693 skb = dev->rx_sg_skb;
1694 dev->rx_sg_skb = NULL;
1696 ctrl &= EMAC_BAD_RX_MASK;
1697 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1698 emac_parse_rx_error(dev, ctrl);
1699 ++dev->estats.rx_dropped_error;
1707 DBG(dev, "rx OOM %d" NL, slot);
1708 /* Drop the packet and recycle skb */
1709 ++dev->estats.rx_dropped_oom;
1710 emac_recycle_rx_skb(dev, slot, 0);
1715 DBG2(dev, "rx %d BDs" NL, received);
1716 dev->rx_slot = slot;
1719 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1721 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1722 DBG2(dev, "rx restart" NL);
1727 if (dev->rx_sg_skb) {
1728 DBG2(dev, "dropping partial rx packet" NL);
1729 ++dev->estats.rx_dropped_error;
1730 dev_kfree_skb(dev->rx_sg_skb);
1731 dev->rx_sg_skb = NULL;
1734 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1735 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1736 emac_rx_enable(dev);
1742 /* NAPI poll context */
1743 static int emac_peek_rx(void *param)
1745 struct emac_instance *dev = param;
1747 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1750 /* NAPI poll context */
1751 static int emac_peek_rx_sg(void *param)
1753 struct emac_instance *dev = param;
1755 int slot = dev->rx_slot;
1757 u16 ctrl = dev->rx_desc[slot].ctrl;
1758 if (ctrl & MAL_RX_CTRL_EMPTY)
1760 else if (ctrl & MAL_RX_CTRL_LAST)
1763 slot = (slot + 1) % NUM_RX_BUFF;
1765 /* I'm just being paranoid here :) */
1766 if (unlikely(slot == dev->rx_slot))
1772 static void emac_rxde(void *param)
1774 struct emac_instance *dev = param;
1776 ++dev->estats.rx_stopped;
1777 emac_rx_disable_async(dev);
1781 static irqreturn_t emac_irq(int irq, void *dev_instance)
1783 struct emac_instance *dev = dev_instance;
1784 struct emac_regs __iomem *p = dev->emacp;
1785 struct emac_error_stats *st = &dev->estats;
1788 spin_lock(&dev->lock);
1790 isr = in_be32(&p->isr);
1791 out_be32(&p->isr, isr);
1793 DBG(dev, "isr = %08x" NL, isr);
1795 if (isr & EMAC4_ISR_TXPE)
1797 if (isr & EMAC4_ISR_RXPE)
1799 if (isr & EMAC4_ISR_TXUE)
1801 if (isr & EMAC4_ISR_RXOE)
1802 ++st->rx_fifo_overrun;
1803 if (isr & EMAC_ISR_OVR)
1805 if (isr & EMAC_ISR_BP)
1806 ++st->rx_bad_packet;
1807 if (isr & EMAC_ISR_RP)
1808 ++st->rx_runt_packet;
1809 if (isr & EMAC_ISR_SE)
1810 ++st->rx_short_event;
1811 if (isr & EMAC_ISR_ALE)
1812 ++st->rx_alignment_error;
1813 if (isr & EMAC_ISR_BFCS)
1815 if (isr & EMAC_ISR_PTLE)
1816 ++st->rx_packet_too_long;
1817 if (isr & EMAC_ISR_ORE)
1818 ++st->rx_out_of_range;
1819 if (isr & EMAC_ISR_IRE)
1821 if (isr & EMAC_ISR_SQE)
1823 if (isr & EMAC_ISR_TE)
1826 spin_unlock(&dev->lock);
1831 static struct net_device_stats *emac_stats(struct net_device *ndev)
1833 struct emac_instance *dev = netdev_priv(ndev);
1834 struct emac_stats *st = &dev->stats;
1835 struct emac_error_stats *est = &dev->estats;
1836 struct net_device_stats *nst = &dev->nstats;
1837 unsigned long flags;
1839 DBG2(dev, "stats" NL);
1841 /* Compute "legacy" statistics */
1842 spin_lock_irqsave(&dev->lock, flags);
1843 nst->rx_packets = (unsigned long)st->rx_packets;
1844 nst->rx_bytes = (unsigned long)st->rx_bytes;
1845 nst->tx_packets = (unsigned long)st->tx_packets;
1846 nst->tx_bytes = (unsigned long)st->tx_bytes;
1847 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1848 est->rx_dropped_error +
1849 est->rx_dropped_resize +
1850 est->rx_dropped_mtu);
1851 nst->tx_dropped = (unsigned long)est->tx_dropped;
1853 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1854 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1855 est->rx_fifo_overrun +
1857 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1858 est->rx_alignment_error);
1859 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1861 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1862 est->rx_bd_short_event +
1863 est->rx_bd_packet_too_long +
1864 est->rx_bd_out_of_range +
1865 est->rx_bd_in_range +
1866 est->rx_runt_packet +
1867 est->rx_short_event +
1868 est->rx_packet_too_long +
1869 est->rx_out_of_range +
1872 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1873 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1875 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1876 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1877 est->tx_bd_excessive_collisions +
1878 est->tx_bd_late_collision +
1879 est->tx_bd_multple_collisions);
1880 spin_unlock_irqrestore(&dev->lock, flags);
1884 static struct mal_commac_ops emac_commac_ops = {
1885 .poll_tx = &emac_poll_tx,
1886 .poll_rx = &emac_poll_rx,
1887 .peek_rx = &emac_peek_rx,
1891 static struct mal_commac_ops emac_commac_sg_ops = {
1892 .poll_tx = &emac_poll_tx,
1893 .poll_rx = &emac_poll_rx,
1894 .peek_rx = &emac_peek_rx_sg,
1898 /* Ethtool support */
1899 static int emac_ethtool_get_settings(struct net_device *ndev,
1900 struct ethtool_cmd *cmd)
1902 struct emac_instance *dev = netdev_priv(ndev);
1904 cmd->supported = dev->phy.features;
1905 cmd->port = PORT_MII;
1906 cmd->phy_address = dev->phy.address;
1908 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1910 mutex_lock(&dev->link_lock);
1911 cmd->advertising = dev->phy.advertising;
1912 cmd->autoneg = dev->phy.autoneg;
1913 cmd->speed = dev->phy.speed;
1914 cmd->duplex = dev->phy.duplex;
1915 mutex_unlock(&dev->link_lock);
1920 static int emac_ethtool_set_settings(struct net_device *ndev,
1921 struct ethtool_cmd *cmd)
1923 struct emac_instance *dev = netdev_priv(ndev);
1924 u32 f = dev->phy.features;
1926 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1927 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1929 /* Basic sanity checks */
1930 if (dev->phy.address < 0)
1932 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1934 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1936 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1939 if (cmd->autoneg == AUTONEG_DISABLE) {
1940 switch (cmd->speed) {
1942 if (cmd->duplex == DUPLEX_HALF
1943 && !(f & SUPPORTED_10baseT_Half))
1945 if (cmd->duplex == DUPLEX_FULL
1946 && !(f & SUPPORTED_10baseT_Full))
1950 if (cmd->duplex == DUPLEX_HALF
1951 && !(f & SUPPORTED_100baseT_Half))
1953 if (cmd->duplex == DUPLEX_FULL
1954 && !(f & SUPPORTED_100baseT_Full))
1958 if (cmd->duplex == DUPLEX_HALF
1959 && !(f & SUPPORTED_1000baseT_Half))
1961 if (cmd->duplex == DUPLEX_FULL
1962 && !(f & SUPPORTED_1000baseT_Full))
1969 mutex_lock(&dev->link_lock);
1970 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1972 mutex_unlock(&dev->link_lock);
1975 if (!(f & SUPPORTED_Autoneg))
1978 mutex_lock(&dev->link_lock);
1979 dev->phy.def->ops->setup_aneg(&dev->phy,
1980 (cmd->advertising & f) |
1981 (dev->phy.advertising &
1983 ADVERTISED_Asym_Pause)));
1984 mutex_unlock(&dev->link_lock);
1986 emac_force_link_update(dev);
1991 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1992 struct ethtool_ringparam *rp)
1994 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1995 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1998 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1999 struct ethtool_pauseparam *pp)
2001 struct emac_instance *dev = netdev_priv(ndev);
2003 mutex_lock(&dev->link_lock);
2004 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2005 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2008 if (dev->phy.duplex == DUPLEX_FULL) {
2010 pp->rx_pause = pp->tx_pause = 1;
2011 else if (dev->phy.asym_pause)
2014 mutex_unlock(&dev->link_lock);
2017 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2019 struct emac_instance *dev = netdev_priv(ndev);
2021 return dev->tah_dev != NULL;
2024 static int emac_get_regs_len(struct emac_instance *dev)
2026 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2027 return sizeof(struct emac_ethtool_regs_subhdr) +
2028 EMAC4_ETHTOOL_REGS_SIZE;
2030 return sizeof(struct emac_ethtool_regs_subhdr) +
2031 EMAC_ETHTOOL_REGS_SIZE;
2034 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2036 struct emac_instance *dev = netdev_priv(ndev);
2039 size = sizeof(struct emac_ethtool_regs_hdr) +
2040 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2041 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2042 size += zmii_get_regs_len(dev->zmii_dev);
2043 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2044 size += rgmii_get_regs_len(dev->rgmii_dev);
2045 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2046 size += tah_get_regs_len(dev->tah_dev);
2051 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2053 struct emac_ethtool_regs_subhdr *hdr = buf;
2055 hdr->index = dev->cell_index;
2056 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2057 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2058 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2059 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2061 hdr->version = EMAC_ETHTOOL_REGS_VER;
2062 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2063 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2067 static void emac_ethtool_get_regs(struct net_device *ndev,
2068 struct ethtool_regs *regs, void *buf)
2070 struct emac_instance *dev = netdev_priv(ndev);
2071 struct emac_ethtool_regs_hdr *hdr = buf;
2073 hdr->components = 0;
2076 buf = mal_dump_regs(dev->mal, buf);
2077 buf = emac_dump_regs(dev, buf);
2078 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2079 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2080 buf = zmii_dump_regs(dev->zmii_dev, buf);
2082 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2083 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2084 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2086 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2087 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2088 buf = tah_dump_regs(dev->tah_dev, buf);
2092 static int emac_ethtool_nway_reset(struct net_device *ndev)
2094 struct emac_instance *dev = netdev_priv(ndev);
2097 DBG(dev, "nway_reset" NL);
2099 if (dev->phy.address < 0)
2102 mutex_lock(&dev->link_lock);
2103 if (!dev->phy.autoneg) {
2108 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2110 mutex_unlock(&dev->link_lock);
2111 emac_force_link_update(dev);
2115 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2117 return EMAC_ETHTOOL_STATS_COUNT;
2120 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2123 if (stringset == ETH_SS_STATS)
2124 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2127 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2128 struct ethtool_stats *estats,
2131 struct emac_instance *dev = netdev_priv(ndev);
2133 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2134 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2135 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2138 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2139 struct ethtool_drvinfo *info)
2141 struct emac_instance *dev = netdev_priv(ndev);
2143 strcpy(info->driver, "ibm_emac");
2144 strcpy(info->version, DRV_VERSION);
2145 info->fw_version[0] = '\0';
2146 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2147 dev->cell_index, dev->ofdev->node->full_name);
2148 info->n_stats = emac_ethtool_get_stats_count(ndev);
2149 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2152 static const struct ethtool_ops emac_ethtool_ops = {
2153 .get_settings = emac_ethtool_get_settings,
2154 .set_settings = emac_ethtool_set_settings,
2155 .get_drvinfo = emac_ethtool_get_drvinfo,
2157 .get_regs_len = emac_ethtool_get_regs_len,
2158 .get_regs = emac_ethtool_get_regs,
2160 .nway_reset = emac_ethtool_nway_reset,
2162 .get_ringparam = emac_ethtool_get_ringparam,
2163 .get_pauseparam = emac_ethtool_get_pauseparam,
2165 .get_rx_csum = emac_ethtool_get_rx_csum,
2167 .get_strings = emac_ethtool_get_strings,
2168 .get_stats_count = emac_ethtool_get_stats_count,
2169 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2171 .get_link = ethtool_op_get_link,
2172 .get_tx_csum = ethtool_op_get_tx_csum,
2173 .get_sg = ethtool_op_get_sg,
2176 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2178 struct emac_instance *dev = netdev_priv(ndev);
2179 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2181 DBG(dev, "ioctl %08x" NL, cmd);
2183 if (dev->phy.address < 0)
2188 case SIOCDEVPRIVATE:
2189 data[0] = dev->phy.address;
2192 case SIOCDEVPRIVATE + 1:
2193 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2197 case SIOCDEVPRIVATE + 2:
2198 if (!capable(CAP_NET_ADMIN))
2200 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2207 struct emac_depentry {
2209 struct device_node *node;
2210 struct of_device *ofdev;
2214 #define EMAC_DEP_MAL_IDX 0
2215 #define EMAC_DEP_ZMII_IDX 1
2216 #define EMAC_DEP_RGMII_IDX 2
2217 #define EMAC_DEP_TAH_IDX 3
2218 #define EMAC_DEP_MDIO_IDX 4
2219 #define EMAC_DEP_PREV_IDX 5
2220 #define EMAC_DEP_COUNT 6
2222 static int __devinit emac_check_deps(struct emac_instance *dev,
2223 struct emac_depentry *deps)
2226 struct device_node *np;
2228 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2229 /* no dependency on that item, allright */
2230 if (deps[i].phandle == 0) {
2234 /* special case for blist as the dependency might go away */
2235 if (i == EMAC_DEP_PREV_IDX) {
2236 np = *(dev->blist - 1);
2238 deps[i].phandle = 0;
2242 if (deps[i].node == NULL)
2243 deps[i].node = of_node_get(np);
2245 if (deps[i].node == NULL)
2246 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2247 if (deps[i].node == NULL)
2249 if (deps[i].ofdev == NULL)
2250 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2251 if (deps[i].ofdev == NULL)
2253 if (deps[i].drvdata == NULL)
2254 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2255 if (deps[i].drvdata != NULL)
2258 return (there == EMAC_DEP_COUNT);
2261 static void emac_put_deps(struct emac_instance *dev)
2264 of_dev_put(dev->mal_dev);
2266 of_dev_put(dev->zmii_dev);
2268 of_dev_put(dev->rgmii_dev);
2270 of_dev_put(dev->mdio_dev);
2272 of_dev_put(dev->tah_dev);
2275 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2276 unsigned long action, void *data)
2278 /* We are only intereted in device addition */
2279 if (action == BUS_NOTIFY_BOUND_DRIVER)
2280 wake_up_all(&emac_probe_wait);
2284 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2285 .notifier_call = emac_of_bus_notify
2288 static int __devinit emac_wait_deps(struct emac_instance *dev)
2290 struct emac_depentry deps[EMAC_DEP_COUNT];
2293 memset(&deps, 0, sizeof(deps));
2295 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2296 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2297 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2299 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2301 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2302 if (dev->blist && dev->blist > emac_boot_list)
2303 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2304 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2305 wait_event_timeout(emac_probe_wait,
2306 emac_check_deps(dev, deps),
2307 EMAC_PROBE_DEP_TIMEOUT);
2308 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2309 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2310 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2312 of_node_put(deps[i].node);
2313 if (err && deps[i].ofdev)
2314 of_dev_put(deps[i].ofdev);
2317 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2318 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2319 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2320 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2321 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2323 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2324 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2328 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2329 u32 *val, int fatal)
2332 const u32 *prop = of_get_property(np, name, &len);
2333 if (prop == NULL || len < sizeof(u32)) {
2335 printk(KERN_ERR "%s: missing %s property\n",
2336 np->full_name, name);
2343 static int __devinit emac_init_phy(struct emac_instance *dev)
2345 struct device_node *np = dev->ofdev->node;
2346 struct net_device *ndev = dev->ndev;
2350 dev->phy.dev = ndev;
2351 dev->phy.mode = dev->phy_mode;
2353 /* PHY-less configuration.
2354 * XXX I probably should move these settings to the dev tree
2356 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2359 /* PHY-less configuration.
2360 * XXX I probably should move these settings to the dev tree
2362 dev->phy.address = -1;
2363 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2369 mutex_lock(&emac_phy_map_lock);
2370 phy_map = dev->phy_map | busy_phy_map;
2372 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2374 dev->phy.mdio_read = emac_mdio_read;
2375 dev->phy.mdio_write = emac_mdio_write;
2377 /* Enable internal clock source */
2378 #ifdef CONFIG_PPC_DCR_NATIVE
2379 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2380 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2382 /* PHY clock workaround */
2383 emac_rx_clk_tx(dev);
2385 /* Enable internal clock source on 440GX*/
2386 #ifdef CONFIG_PPC_DCR_NATIVE
2387 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2388 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2390 /* Configure EMAC with defaults so we can at least use MDIO
2391 * This is needed mostly for 440GX
2393 if (emac_phy_gpcs(dev->phy.mode)) {
2395 * Make GPCS PHY address equal to EMAC index.
2396 * We probably should take into account busy_phy_map
2397 * and/or phy_map here.
2399 * Note that the busy_phy_map is currently global
2400 * while it should probably be per-ASIC...
2402 dev->phy.address = dev->cell_index;
2405 emac_configure(dev);
2407 if (dev->phy_address != 0xffffffff)
2408 phy_map = ~(1 << dev->phy_address);
2410 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2411 if (!(phy_map & 1)) {
2413 busy_phy_map |= 1 << i;
2415 /* Quick check if there is a PHY at the address */
2416 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2417 if (r == 0xffff || r < 0)
2419 if (!emac_mii_phy_probe(&dev->phy, i))
2423 /* Enable external clock source */
2424 #ifdef CONFIG_PPC_DCR_NATIVE
2425 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2426 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2428 mutex_unlock(&emac_phy_map_lock);
2430 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2435 if (dev->phy.def->ops->init)
2436 dev->phy.def->ops->init(&dev->phy);
2438 /* Disable any PHY features not supported by the platform */
2439 dev->phy.def->features &= ~dev->phy_feat_exc;
2441 /* Setup initial link parameters */
2442 if (dev->phy.features & SUPPORTED_Autoneg) {
2443 adv = dev->phy.features;
2444 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2445 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2446 /* Restart autonegotiation */
2447 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2449 u32 f = dev->phy.def->features;
2450 int speed = SPEED_10, fd = DUPLEX_HALF;
2452 /* Select highest supported speed/duplex */
2453 if (f & SUPPORTED_1000baseT_Full) {
2456 } else if (f & SUPPORTED_1000baseT_Half)
2458 else if (f & SUPPORTED_100baseT_Full) {
2461 } else if (f & SUPPORTED_100baseT_Half)
2463 else if (f & SUPPORTED_10baseT_Full)
2466 /* Force link parameters */
2467 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2472 static int __devinit emac_init_config(struct emac_instance *dev)
2474 struct device_node *np = dev->ofdev->node;
2477 const char *pm, *phy_modes[] = {
2479 [PHY_MODE_MII] = "mii",
2480 [PHY_MODE_RMII] = "rmii",
2481 [PHY_MODE_SMII] = "smii",
2482 [PHY_MODE_RGMII] = "rgmii",
2483 [PHY_MODE_TBI] = "tbi",
2484 [PHY_MODE_GMII] = "gmii",
2485 [PHY_MODE_RTBI] = "rtbi",
2486 [PHY_MODE_SGMII] = "sgmii",
2489 /* Read config from device-tree */
2490 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2492 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2494 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2496 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2498 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2499 dev->max_mtu = 1500;
2500 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2501 dev->rx_fifo_size = 2048;
2502 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2503 dev->tx_fifo_size = 2048;
2504 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2505 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2506 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2507 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2508 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2509 dev->phy_address = 0xffffffff;
2510 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2511 dev->phy_map = 0xffffffff;
2512 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2514 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2516 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2518 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2520 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2522 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2523 dev->zmii_port = 0xffffffff;;
2524 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2526 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2527 dev->rgmii_port = 0xffffffff;;
2528 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2529 dev->fifo_entry_size = 16;
2530 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2531 dev->mal_burst_size = 256;
2533 /* PHY mode needs some decoding */
2534 dev->phy_mode = PHY_MODE_NA;
2535 pm = of_get_property(np, "phy-mode", &plen);
2538 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2539 if (!strcasecmp(pm, phy_modes[i])) {
2545 /* Backward compat with non-final DT */
2546 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2547 u32 nmode = *(const u32 *)pm;
2548 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2549 dev->phy_mode = nmode;
2552 /* Check EMAC version */
2553 if (of_device_is_compatible(np, "ibm,emac4")) {
2554 dev->features |= EMAC_FTR_EMAC4;
2555 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2556 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2558 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2559 of_device_is_compatible(np, "ibm,emac-440gr"))
2560 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2563 /* Fixup some feature bits based on the device tree */
2564 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2565 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2566 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2567 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2569 /* CAB lacks the appropriate properties */
2570 if (of_device_is_compatible(np, "ibm,emac-axon"))
2571 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2572 EMAC_FTR_STACR_OC_INVERT;
2574 /* Enable TAH/ZMII/RGMII features as found */
2575 if (dev->tah_ph != 0) {
2576 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2577 dev->features |= EMAC_FTR_HAS_TAH;
2579 printk(KERN_ERR "%s: TAH support not enabled !\n",
2585 if (dev->zmii_ph != 0) {
2586 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2587 dev->features |= EMAC_FTR_HAS_ZMII;
2589 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2595 if (dev->rgmii_ph != 0) {
2596 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2597 dev->features |= EMAC_FTR_HAS_RGMII;
2599 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2605 /* Read MAC-address */
2606 p = of_get_property(np, "local-mac-address", NULL);
2608 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2612 memcpy(dev->ndev->dev_addr, p, 6);
2614 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2615 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2616 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2617 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2618 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2623 static int __devinit emac_probe(struct of_device *ofdev,
2624 const struct of_device_id *match)
2626 struct net_device *ndev;
2627 struct emac_instance *dev;
2628 struct device_node *np = ofdev->node;
2629 struct device_node **blist = NULL;
2632 /* Skip unused/unwired EMACS. We leave the check for an unused
2633 * property here for now, but new flat device trees should set a
2634 * status property to "disabled" instead.
2636 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2639 /* Find ourselves in the bootlist if we are there */
2640 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2641 if (emac_boot_list[i] == np)
2642 blist = &emac_boot_list[i];
2644 /* Allocate our net_device structure */
2646 ndev = alloc_etherdev(sizeof(struct emac_instance));
2648 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2652 dev = netdev_priv(ndev);
2656 SET_NETDEV_DEV(ndev, &ofdev->dev);
2658 /* Initialize some embedded data structures */
2659 mutex_init(&dev->mdio_lock);
2660 mutex_init(&dev->link_lock);
2661 spin_lock_init(&dev->lock);
2662 INIT_WORK(&dev->reset_work, emac_reset_work);
2664 /* Init various config data based on device-tree */
2665 err = emac_init_config(dev);
2669 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2670 dev->emac_irq = irq_of_parse_and_map(np, 0);
2671 dev->wol_irq = irq_of_parse_and_map(np, 1);
2672 if (dev->emac_irq == NO_IRQ) {
2673 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2676 ndev->irq = dev->emac_irq;
2679 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2680 printk(KERN_ERR "%s: Can't get registers address\n",
2684 // TODO : request_mem_region
2685 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2686 if (dev->emacp == NULL) {
2687 printk(KERN_ERR "%s: Can't map device registers!\n",
2693 /* Wait for dependent devices */
2694 err = emac_wait_deps(dev);
2697 "%s: Timeout waiting for dependent devices\n",
2699 /* display more info about what's missing ? */
2702 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2703 if (dev->mdio_dev != NULL)
2704 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2706 /* Register with MAL */
2707 dev->commac.ops = &emac_commac_ops;
2708 dev->commac.dev = dev;
2709 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2710 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2711 err = mal_register_commac(dev->mal, &dev->commac);
2713 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2714 np->full_name, dev->mal_dev->node->full_name);
2717 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2718 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2720 /* Get pointers to BD rings */
2722 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2724 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2726 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2727 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2730 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2731 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2732 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2733 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2735 /* Attach to ZMII, if needed */
2736 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2737 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2738 goto err_unreg_commac;
2740 /* Attach to RGMII, if needed */
2741 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2742 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2743 goto err_detach_zmii;
2745 /* Attach to TAH, if needed */
2746 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2747 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2748 goto err_detach_rgmii;
2750 /* Set some link defaults before we can find out real parameters */
2751 dev->phy.speed = SPEED_100;
2752 dev->phy.duplex = DUPLEX_FULL;
2753 dev->phy.autoneg = AUTONEG_DISABLE;
2754 dev->phy.pause = dev->phy.asym_pause = 0;
2755 dev->stop_timeout = STOP_TIMEOUT_100;
2756 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2758 /* Find PHY if any */
2759 err = emac_init_phy(dev);
2761 goto err_detach_tah;
2763 /* Fill in the driver function table */
2764 ndev->open = &emac_open;
2766 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2767 ndev->tx_timeout = &emac_tx_timeout;
2768 ndev->watchdog_timeo = 5 * HZ;
2769 ndev->stop = &emac_close;
2770 ndev->get_stats = &emac_stats;
2771 ndev->set_multicast_list = &emac_set_multicast_list;
2772 ndev->do_ioctl = &emac_ioctl;
2773 if (emac_phy_supports_gige(dev->phy_mode)) {
2774 ndev->hard_start_xmit = &emac_start_xmit_sg;
2775 ndev->change_mtu = &emac_change_mtu;
2776 dev->commac.ops = &emac_commac_sg_ops;
2778 ndev->hard_start_xmit = &emac_start_xmit;
2780 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2782 netif_carrier_off(ndev);
2783 netif_stop_queue(ndev);
2785 err = register_netdev(ndev);
2787 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2788 np->full_name, err);
2789 goto err_detach_tah;
2792 /* Set our drvdata last as we don't want them visible until we are
2796 dev_set_drvdata(&ofdev->dev, dev);
2798 /* There's a new kid in town ! Let's tell everybody */
2799 wake_up_all(&emac_probe_wait);
2803 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2804 ndev->name, dev->cell_index, np->full_name,
2805 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2806 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2808 if (dev->phy.address >= 0)
2809 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2810 dev->phy.def->name, dev->phy.address);
2812 emac_dbg_register(dev);
2817 /* I have a bad feeling about this ... */
2820 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2821 tah_detach(dev->tah_dev, dev->tah_port);
2823 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2824 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2826 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2827 zmii_detach(dev->zmii_dev, dev->zmii_port);
2829 mal_unregister_commac(dev->mal, &dev->commac);
2833 iounmap(dev->emacp);
2835 if (dev->wol_irq != NO_IRQ)
2836 irq_dispose_mapping(dev->wol_irq);
2837 if (dev->emac_irq != NO_IRQ)
2838 irq_dispose_mapping(dev->emac_irq);
2842 /* if we were on the bootlist, remove us as we won't show up and
2843 * wake up all waiters to notify them in case they were waiting
2848 wake_up_all(&emac_probe_wait);
2853 static int __devexit emac_remove(struct of_device *ofdev)
2855 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2857 DBG(dev, "remove" NL);
2859 dev_set_drvdata(&ofdev->dev, NULL);
2861 unregister_netdev(dev->ndev);
2863 flush_scheduled_work();
2865 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2866 tah_detach(dev->tah_dev, dev->tah_port);
2867 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2868 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2869 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2870 zmii_detach(dev->zmii_dev, dev->zmii_port);
2872 mal_unregister_commac(dev->mal, &dev->commac);
2875 emac_dbg_unregister(dev);
2876 iounmap(dev->emacp);
2878 if (dev->wol_irq != NO_IRQ)
2879 irq_dispose_mapping(dev->wol_irq);
2880 if (dev->emac_irq != NO_IRQ)
2881 irq_dispose_mapping(dev->emac_irq);
2888 /* XXX Features in here should be replaced by properties... */
2889 static struct of_device_id emac_match[] =
2893 .compatible = "ibm,emac",
2897 .compatible = "ibm,emac4",
2902 static struct of_platform_driver emac_driver = {
2904 .match_table = emac_match,
2906 .probe = emac_probe,
2907 .remove = emac_remove,
2910 static void __init emac_make_bootlist(void)
2912 struct device_node *np = NULL;
2913 int j, max, i = 0, k;
2914 int cell_indices[EMAC_BOOT_LIST_SIZE];
2917 while((np = of_find_all_nodes(np)) != NULL) {
2920 if (of_match_node(emac_match, np) == NULL)
2922 if (of_get_property(np, "unused", NULL))
2924 idx = of_get_property(np, "cell-index", NULL);
2927 cell_indices[i] = *idx;
2928 emac_boot_list[i++] = of_node_get(np);
2929 if (i >= EMAC_BOOT_LIST_SIZE) {
2936 /* Bubble sort them (doh, what a creative algorithm :-) */
2937 for (i = 0; max > 1 && (i < (max - 1)); i++)
2938 for (j = i; j < max; j++) {
2939 if (cell_indices[i] > cell_indices[j]) {
2940 np = emac_boot_list[i];
2941 emac_boot_list[i] = emac_boot_list[j];
2942 emac_boot_list[j] = np;
2943 k = cell_indices[i];
2944 cell_indices[i] = cell_indices[j];
2945 cell_indices[j] = k;
2950 static int __init emac_init(void)
2954 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2956 /* Init debug stuff */
2959 /* Build EMAC boot list */
2960 emac_make_bootlist();
2962 /* Init submodules */
2975 rc = of_register_platform_driver(&emac_driver);
2993 static void __exit emac_exit(void)
2997 of_unregister_platform_driver(&emac_driver);
3005 /* Destroy EMAC boot list */
3006 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3007 if (emac_boot_list[i])
3008 of_node_put(emac_boot_list[i]);
3011 module_init(emac_init);
3012 module_exit(emac_exit);