2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_460EX_PHY_CLK_FIX |
134 EMAC_FTR_440EP_PHY_CLK_FIX))
135 DBG(dev, "%s" NL, error);
136 else if (net_ratelimit())
137 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
140 /* EMAC PHY clock workaround:
141 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142 * which allows controlling each EMAC clock
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
146 #ifdef CONFIG_PPC_DCR_NATIVE
147 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 dcri_clrset(SDR0, SDR0_MFR,
149 0, SDR0_MFR_ECS >> dev->cell_index);
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
155 #ifdef CONFIG_PPC_DCR_NATIVE
156 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 dcri_clrset(SDR0, SDR0_MFR,
158 SDR0_MFR_ECS >> dev->cell_index, 0);
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON HZ
164 #define PHY_POLL_LINK_OFF (HZ / 5)
166 /* Graceful stop timeouts in us.
167 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
169 #define STOP_TIMEOUT_10 1230
170 #define STOP_TIMEOUT_100 124
171 #define STOP_TIMEOUT_1000 13
172 #define STOP_TIMEOUT_1000_JUMBO 73
174 static unsigned char default_mcast_addr[] = {
175 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 "tx_bd_excessive_collisions", "tx_bd_late_collision",
192 "tx_bd_multple_collisions", "tx_bd_single_collision",
193 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
201 static inline int emac_phy_supports_gige(int phy_mode)
203 return phy_mode == PHY_MODE_GMII ||
204 phy_mode == PHY_MODE_RGMII ||
205 phy_mode == PHY_MODE_TBI ||
206 phy_mode == PHY_MODE_RTBI;
209 static inline int emac_phy_gpcs(int phy_mode)
211 return phy_mode == PHY_MODE_TBI ||
212 phy_mode == PHY_MODE_RTBI;
215 static inline void emac_tx_enable(struct emac_instance *dev)
217 struct emac_regs __iomem *p = dev->emacp;
220 DBG(dev, "tx_enable" NL);
222 r = in_be32(&p->mr0);
223 if (!(r & EMAC_MR0_TXE))
224 out_be32(&p->mr0, r | EMAC_MR0_TXE);
227 static void emac_tx_disable(struct emac_instance *dev)
229 struct emac_regs __iomem *p = dev->emacp;
232 DBG(dev, "tx_disable" NL);
234 r = in_be32(&p->mr0);
235 if (r & EMAC_MR0_TXE) {
236 int n = dev->stop_timeout;
237 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
238 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
243 emac_report_timeout_error(dev, "TX disable timeout");
247 static void emac_rx_enable(struct emac_instance *dev)
249 struct emac_regs __iomem *p = dev->emacp;
252 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
255 DBG(dev, "rx_enable" NL);
257 r = in_be32(&p->mr0);
258 if (!(r & EMAC_MR0_RXE)) {
259 if (unlikely(!(r & EMAC_MR0_RXI))) {
260 /* Wait if previous async disable is still in progress */
261 int n = dev->stop_timeout;
262 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
267 emac_report_timeout_error(dev,
268 "RX disable timeout");
270 out_be32(&p->mr0, r | EMAC_MR0_RXE);
276 static void emac_rx_disable(struct emac_instance *dev)
278 struct emac_regs __iomem *p = dev->emacp;
281 DBG(dev, "rx_disable" NL);
283 r = in_be32(&p->mr0);
284 if (r & EMAC_MR0_RXE) {
285 int n = dev->stop_timeout;
286 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
287 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
292 emac_report_timeout_error(dev, "RX disable timeout");
296 static inline void emac_netif_stop(struct emac_instance *dev)
298 netif_tx_lock_bh(dev->ndev);
299 netif_addr_lock(dev->ndev);
301 netif_addr_unlock(dev->ndev);
302 netif_tx_unlock_bh(dev->ndev);
303 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
304 mal_poll_disable(dev->mal, &dev->commac);
305 netif_tx_disable(dev->ndev);
308 static inline void emac_netif_start(struct emac_instance *dev)
310 netif_tx_lock_bh(dev->ndev);
311 netif_addr_lock(dev->ndev);
313 if (dev->mcast_pending && netif_running(dev->ndev))
314 __emac_set_multicast_list(dev);
315 netif_addr_unlock(dev->ndev);
316 netif_tx_unlock_bh(dev->ndev);
318 netif_wake_queue(dev->ndev);
320 /* NOTE: unconditional netif_wake_queue is only appropriate
321 * so long as all callers are assured to have free tx slots
322 * (taken from tg3... though the case where that is wrong is
323 * not terribly harmful)
325 mal_poll_enable(dev->mal, &dev->commac);
328 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 struct emac_regs __iomem *p = dev->emacp;
333 DBG(dev, "rx_disable_async" NL);
335 r = in_be32(&p->mr0);
336 if (r & EMAC_MR0_RXE)
337 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
340 static int emac_reset(struct emac_instance *dev)
342 struct emac_regs __iomem *p = dev->emacp;
345 DBG(dev, "reset" NL);
347 if (!dev->reset_failed) {
348 /* 40x erratum suggests stopping RX channel before reset,
351 emac_rx_disable(dev);
352 emac_tx_disable(dev);
355 #ifdef CONFIG_PPC_DCR_NATIVE
356 /* Enable internal clock source */
357 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
358 dcri_clrset(SDR0, SDR0_ETH_CFG,
359 0, SDR0_ETH_CFG_ECS << dev->cell_index);
362 out_be32(&p->mr0, EMAC_MR0_SRST);
363 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
366 #ifdef CONFIG_PPC_DCR_NATIVE
367 /* Enable external clock source */
368 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
369 dcri_clrset(SDR0, SDR0_ETH_CFG,
370 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
374 dev->reset_failed = 0;
377 emac_report_timeout_error(dev, "reset timeout");
378 dev->reset_failed = 1;
383 static void emac_hash_mc(struct emac_instance *dev)
385 const int regs = EMAC_XAHT_REGS(dev);
386 u32 *gaht_base = emac_gaht_base(dev);
388 struct dev_mc_list *dmi;
391 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
393 memset(gaht_temp, 0, sizeof (gaht_temp));
395 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
397 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
398 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
399 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
401 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
402 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
403 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
405 gaht_temp[reg] |= mask;
408 for (i = 0; i < regs; i++)
409 out_be32(gaht_base + i, gaht_temp[i]);
412 static inline u32 emac_iff2rmr(struct net_device *ndev)
414 struct emac_instance *dev = netdev_priv(ndev);
417 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
419 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
424 if (ndev->flags & IFF_PROMISC)
426 else if (ndev->flags & IFF_ALLMULTI ||
427 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
429 else if (ndev->mc_count > 0)
435 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
437 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
439 DBG2(dev, "__emac_calc_base_mr1" NL);
443 ret |= EMAC_MR1_TFS_2K;
446 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
447 dev->ndev->name, tx_size);
452 ret |= EMAC_MR1_RFS_16K;
455 ret |= EMAC_MR1_RFS_4K;
458 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
459 dev->ndev->name, rx_size);
465 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
467 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
468 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
470 DBG2(dev, "__emac4_calc_base_mr1" NL);
474 ret |= EMAC4_MR1_TFS_4K;
477 ret |= EMAC4_MR1_TFS_2K;
480 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
481 dev->ndev->name, tx_size);
486 ret |= EMAC4_MR1_RFS_16K;
489 ret |= EMAC4_MR1_RFS_4K;
492 ret |= EMAC4_MR1_RFS_2K;
495 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
496 dev->ndev->name, rx_size);
502 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
504 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
505 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
506 __emac_calc_base_mr1(dev, tx_size, rx_size);
509 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
511 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
512 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
514 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
517 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
518 unsigned int low, unsigned int high)
520 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
521 return (low << 22) | ( (high & 0x3ff) << 6);
523 return (low << 23) | ( (high & 0x1ff) << 7);
526 static int emac_configure(struct emac_instance *dev)
528 struct emac_regs __iomem *p = dev->emacp;
529 struct net_device *ndev = dev->ndev;
530 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
533 DBG(dev, "configure" NL);
536 out_be32(&p->mr1, in_be32(&p->mr1)
537 | EMAC_MR1_FDE | EMAC_MR1_ILE);
539 } else if (emac_reset(dev) < 0)
542 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
543 tah_reset(dev->tah_dev);
545 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
546 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
548 /* Default fifo sizes */
549 tx_size = dev->tx_fifo_size;
550 rx_size = dev->rx_fifo_size;
552 /* No link, force loopback */
554 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
556 /* Check for full duplex */
557 else if (dev->phy.duplex == DUPLEX_FULL)
558 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
560 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
561 dev->stop_timeout = STOP_TIMEOUT_10;
562 switch (dev->phy.speed) {
564 if (emac_phy_gpcs(dev->phy.mode)) {
565 mr1 |= EMAC_MR1_MF_1000GPCS |
566 EMAC_MR1_MF_IPPA(dev->phy.address);
568 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
569 * identify this GPCS PHY later.
571 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
573 mr1 |= EMAC_MR1_MF_1000;
575 /* Extended fifo sizes */
576 tx_size = dev->tx_fifo_size_gige;
577 rx_size = dev->rx_fifo_size_gige;
579 if (dev->ndev->mtu > ETH_DATA_LEN) {
580 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
581 mr1 |= EMAC4_MR1_JPSM;
583 mr1 |= EMAC_MR1_JPSM;
584 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
586 dev->stop_timeout = STOP_TIMEOUT_1000;
589 mr1 |= EMAC_MR1_MF_100;
590 dev->stop_timeout = STOP_TIMEOUT_100;
592 default: /* make gcc happy */
596 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
597 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
599 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
600 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
602 /* on 40x erratum forces us to NOT use integrated flow control,
603 * let's hope it works on 44x ;)
605 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
606 dev->phy.duplex == DUPLEX_FULL) {
608 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
609 else if (dev->phy.asym_pause)
613 /* Add base settings & fifo sizes & program MR1 */
614 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
615 out_be32(&p->mr1, mr1);
617 /* Set individual MAC address */
618 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
619 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
620 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
623 /* VLAN Tag Protocol ID */
624 out_be32(&p->vtpid, 0x8100);
626 /* Receive mode register */
627 r = emac_iff2rmr(ndev);
628 if (r & EMAC_RMR_MAE)
630 out_be32(&p->rmr, r);
632 /* FIFOs thresholds */
633 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
634 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
635 tx_size / 2 / dev->fifo_entry_size);
637 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
638 tx_size / 2 / dev->fifo_entry_size);
639 out_be32(&p->tmr1, r);
640 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
642 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
643 there should be still enough space in FIFO to allow the our link
644 partner time to process this frame and also time to send PAUSE
647 Here is the worst case scenario for the RX FIFO "headroom"
648 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
650 1) One maximum-length frame on TX 1522 bytes
651 2) One PAUSE frame time 64 bytes
652 3) PAUSE frame decode time allowance 64 bytes
653 4) One maximum-length frame on RX 1522 bytes
654 5) Round-trip propagation delay of the link (100Mb) 15 bytes
658 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
659 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
661 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
662 rx_size / 4 / dev->fifo_entry_size);
663 out_be32(&p->rwmr, r);
665 /* Set PAUSE timer to the maximum */
666 out_be32(&p->ptr, 0xffff);
669 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
670 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
671 EMAC_ISR_IRE | EMAC_ISR_TE;
672 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
673 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
675 out_be32(&p->iser, r);
677 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
678 if (emac_phy_gpcs(dev->phy.mode))
679 emac_mii_reset_phy(&dev->phy);
684 static void emac_reinitialize(struct emac_instance *dev)
686 DBG(dev, "reinitialize" NL);
688 emac_netif_stop(dev);
689 if (!emac_configure(dev)) {
693 emac_netif_start(dev);
696 static void emac_full_tx_reset(struct emac_instance *dev)
698 DBG(dev, "full_tx_reset" NL);
700 emac_tx_disable(dev);
701 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
702 emac_clean_tx_ring(dev);
703 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
707 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
712 static void emac_reset_work(struct work_struct *work)
714 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
716 DBG(dev, "reset_work" NL);
718 mutex_lock(&dev->link_lock);
720 emac_netif_stop(dev);
721 emac_full_tx_reset(dev);
722 emac_netif_start(dev);
724 mutex_unlock(&dev->link_lock);
727 static void emac_tx_timeout(struct net_device *ndev)
729 struct emac_instance *dev = netdev_priv(ndev);
731 DBG(dev, "tx_timeout" NL);
733 schedule_work(&dev->reset_work);
737 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
739 int done = !!(stacr & EMAC_STACR_OC);
741 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
747 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
749 struct emac_regs __iomem *p = dev->emacp;
751 int n, err = -ETIMEDOUT;
753 mutex_lock(&dev->mdio_lock);
755 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
757 /* Enable proper MDIO port */
758 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
759 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
760 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
761 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
763 /* Wait for management interface to become idle */
765 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
768 DBG2(dev, " -> timeout wait idle\n");
773 /* Issue read command */
774 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
775 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
777 r = EMAC_STACR_BASE(dev->opb_bus_freq);
778 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
780 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
781 r |= EMACX_STACR_STAC_READ;
783 r |= EMAC_STACR_STAC_READ;
784 r |= (reg & EMAC_STACR_PRA_MASK)
785 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
786 out_be32(&p->stacr, r);
788 /* Wait for read to complete */
790 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
793 DBG2(dev, " -> timeout wait complete\n");
798 if (unlikely(r & EMAC_STACR_PHYE)) {
799 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
804 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
806 DBG2(dev, "mdio_read -> %04x" NL, r);
809 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
810 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
811 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
813 mutex_unlock(&dev->mdio_lock);
815 return err == 0 ? r : err;
818 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
821 struct emac_regs __iomem *p = dev->emacp;
823 int n, err = -ETIMEDOUT;
825 mutex_lock(&dev->mdio_lock);
827 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
829 /* Enable proper MDIO port */
830 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
831 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
832 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
833 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
835 /* Wait for management interface to be idle */
837 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
840 DBG2(dev, " -> timeout wait idle\n");
845 /* Issue write command */
846 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
847 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
849 r = EMAC_STACR_BASE(dev->opb_bus_freq);
850 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
852 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
853 r |= EMACX_STACR_STAC_WRITE;
855 r |= EMAC_STACR_STAC_WRITE;
856 r |= (reg & EMAC_STACR_PRA_MASK) |
857 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
858 (val << EMAC_STACR_PHYD_SHIFT);
859 out_be32(&p->stacr, r);
861 /* Wait for write to complete */
863 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
866 DBG2(dev, " -> timeout wait complete\n");
872 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
873 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
874 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
875 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
876 mutex_unlock(&dev->mdio_lock);
879 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
881 struct emac_instance *dev = netdev_priv(ndev);
884 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
889 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
891 struct emac_instance *dev = netdev_priv(ndev);
893 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
894 (u8) id, (u8) reg, (u16) val);
898 static void __emac_set_multicast_list(struct emac_instance *dev)
900 struct emac_regs __iomem *p = dev->emacp;
901 u32 rmr = emac_iff2rmr(dev->ndev);
903 DBG(dev, "__multicast %08x" NL, rmr);
905 /* I decided to relax register access rules here to avoid
908 * There is a real problem with EMAC4 core if we use MWSW_001 bit
909 * in MR1 register and do a full EMAC reset.
910 * One TX BD status update is delayed and, after EMAC reset, it
911 * never happens, resulting in TX hung (it'll be recovered by TX
912 * timeout handler eventually, but this is just gross).
913 * So we either have to do full TX reset or try to cheat here :)
915 * The only required change is to RX mode register, so I *think* all
916 * we need is just to stop RX channel. This seems to work on all
919 * If we need the full reset, we might just trigger the workqueue
920 * and do it async... a bit nasty but should work --BenH
922 dev->mcast_pending = 0;
923 emac_rx_disable(dev);
924 if (rmr & EMAC_RMR_MAE)
926 out_be32(&p->rmr, rmr);
931 static void emac_set_multicast_list(struct net_device *ndev)
933 struct emac_instance *dev = netdev_priv(ndev);
935 DBG(dev, "multicast" NL);
937 BUG_ON(!netif_running(dev->ndev));
940 dev->mcast_pending = 1;
943 __emac_set_multicast_list(dev);
946 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
948 int rx_sync_size = emac_rx_sync_size(new_mtu);
949 int rx_skb_size = emac_rx_skb_size(new_mtu);
952 mutex_lock(&dev->link_lock);
953 emac_netif_stop(dev);
954 emac_rx_disable(dev);
955 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
957 if (dev->rx_sg_skb) {
958 ++dev->estats.rx_dropped_resize;
959 dev_kfree_skb(dev->rx_sg_skb);
960 dev->rx_sg_skb = NULL;
963 /* Make a first pass over RX ring and mark BDs ready, dropping
964 * non-processed packets on the way. We need this as a separate pass
965 * to simplify error recovery in the case of allocation failure later.
967 for (i = 0; i < NUM_RX_BUFF; ++i) {
968 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
969 ++dev->estats.rx_dropped_resize;
971 dev->rx_desc[i].data_len = 0;
972 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
973 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
976 /* Reallocate RX ring only if bigger skb buffers are required */
977 if (rx_skb_size <= dev->rx_skb_size)
980 /* Second pass, allocate new skbs */
981 for (i = 0; i < NUM_RX_BUFF; ++i) {
982 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
988 BUG_ON(!dev->rx_skb[i]);
989 dev_kfree_skb(dev->rx_skb[i]);
991 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
992 dev->rx_desc[i].data_ptr =
993 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
994 DMA_FROM_DEVICE) + 2;
995 dev->rx_skb[i] = skb;
998 /* Check if we need to change "Jumbo" bit in MR1 */
999 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1000 /* This is to prevent starting RX channel in emac_rx_enable() */
1001 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1003 dev->ndev->mtu = new_mtu;
1004 emac_full_tx_reset(dev);
1007 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1010 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1012 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1013 emac_rx_enable(dev);
1014 emac_netif_start(dev);
1015 mutex_unlock(&dev->link_lock);
1020 /* Process ctx, rtnl_lock semaphore */
1021 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1023 struct emac_instance *dev = netdev_priv(ndev);
1026 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1029 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1031 if (netif_running(ndev)) {
1032 /* Check if we really need to reinitalize RX ring */
1033 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1034 ret = emac_resize_rx_ring(dev, new_mtu);
1038 ndev->mtu = new_mtu;
1039 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1040 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1046 static void emac_clean_tx_ring(struct emac_instance *dev)
1050 for (i = 0; i < NUM_TX_BUFF; ++i) {
1051 if (dev->tx_skb[i]) {
1052 dev_kfree_skb(dev->tx_skb[i]);
1053 dev->tx_skb[i] = NULL;
1054 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1055 ++dev->estats.tx_dropped;
1057 dev->tx_desc[i].ctrl = 0;
1058 dev->tx_desc[i].data_ptr = 0;
1062 static void emac_clean_rx_ring(struct emac_instance *dev)
1066 for (i = 0; i < NUM_RX_BUFF; ++i)
1067 if (dev->rx_skb[i]) {
1068 dev->rx_desc[i].ctrl = 0;
1069 dev_kfree_skb(dev->rx_skb[i]);
1070 dev->rx_skb[i] = NULL;
1071 dev->rx_desc[i].data_ptr = 0;
1074 if (dev->rx_sg_skb) {
1075 dev_kfree_skb(dev->rx_sg_skb);
1076 dev->rx_sg_skb = NULL;
1080 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1083 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1087 dev->rx_skb[slot] = skb;
1088 dev->rx_desc[slot].data_len = 0;
1090 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1091 dev->rx_desc[slot].data_ptr =
1092 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1093 DMA_FROM_DEVICE) + 2;
1095 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1096 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1101 static void emac_print_link_status(struct emac_instance *dev)
1103 if (netif_carrier_ok(dev->ndev))
1104 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1105 dev->ndev->name, dev->phy.speed,
1106 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1107 dev->phy.pause ? ", pause enabled" :
1108 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1110 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1113 /* Process ctx, rtnl_lock semaphore */
1114 static int emac_open(struct net_device *ndev)
1116 struct emac_instance *dev = netdev_priv(ndev);
1119 DBG(dev, "open" NL);
1121 /* Setup error IRQ handler */
1122 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1124 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1125 ndev->name, dev->emac_irq);
1129 /* Allocate RX ring */
1130 for (i = 0; i < NUM_RX_BUFF; ++i)
1131 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1132 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1137 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1138 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1139 dev->rx_sg_skb = NULL;
1141 mutex_lock(&dev->link_lock);
1144 /* Start PHY polling now.
1146 if (dev->phy.address >= 0) {
1147 int link_poll_interval;
1148 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1149 dev->phy.def->ops->read_link(&dev->phy);
1150 emac_rx_clk_default(dev);
1151 netif_carrier_on(dev->ndev);
1152 link_poll_interval = PHY_POLL_LINK_ON;
1154 emac_rx_clk_tx(dev);
1155 netif_carrier_off(dev->ndev);
1156 link_poll_interval = PHY_POLL_LINK_OFF;
1158 dev->link_polling = 1;
1160 schedule_delayed_work(&dev->link_work, link_poll_interval);
1161 emac_print_link_status(dev);
1163 netif_carrier_on(dev->ndev);
1165 /* Required for Pause packet support in EMAC */
1166 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1168 emac_configure(dev);
1169 mal_poll_add(dev->mal, &dev->commac);
1170 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1171 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1172 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1173 emac_tx_enable(dev);
1174 emac_rx_enable(dev);
1175 emac_netif_start(dev);
1177 mutex_unlock(&dev->link_lock);
1181 emac_clean_rx_ring(dev);
1182 free_irq(dev->emac_irq, dev);
1189 static int emac_link_differs(struct emac_instance *dev)
1191 u32 r = in_be32(&dev->emacp->mr1);
1193 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1194 int speed, pause, asym_pause;
1196 if (r & EMAC_MR1_MF_1000)
1198 else if (r & EMAC_MR1_MF_100)
1203 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1204 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1213 pause = asym_pause = 0;
1215 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1216 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1220 static void emac_link_timer(struct work_struct *work)
1222 struct emac_instance *dev =
1223 container_of((struct delayed_work *)work,
1224 struct emac_instance, link_work);
1225 int link_poll_interval;
1227 mutex_lock(&dev->link_lock);
1228 DBG2(dev, "link timer" NL);
1233 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1234 if (!netif_carrier_ok(dev->ndev)) {
1235 emac_rx_clk_default(dev);
1236 /* Get new link parameters */
1237 dev->phy.def->ops->read_link(&dev->phy);
1239 netif_carrier_on(dev->ndev);
1240 emac_netif_stop(dev);
1241 emac_full_tx_reset(dev);
1242 emac_netif_start(dev);
1243 emac_print_link_status(dev);
1245 link_poll_interval = PHY_POLL_LINK_ON;
1247 if (netif_carrier_ok(dev->ndev)) {
1248 emac_rx_clk_tx(dev);
1249 netif_carrier_off(dev->ndev);
1250 netif_tx_disable(dev->ndev);
1251 emac_reinitialize(dev);
1252 emac_print_link_status(dev);
1254 link_poll_interval = PHY_POLL_LINK_OFF;
1256 schedule_delayed_work(&dev->link_work, link_poll_interval);
1258 mutex_unlock(&dev->link_lock);
1261 static void emac_force_link_update(struct emac_instance *dev)
1263 netif_carrier_off(dev->ndev);
1265 if (dev->link_polling) {
1266 cancel_rearming_delayed_work(&dev->link_work);
1267 if (dev->link_polling)
1268 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1272 /* Process ctx, rtnl_lock semaphore */
1273 static int emac_close(struct net_device *ndev)
1275 struct emac_instance *dev = netdev_priv(ndev);
1277 DBG(dev, "close" NL);
1279 if (dev->phy.address >= 0) {
1280 dev->link_polling = 0;
1281 cancel_rearming_delayed_work(&dev->link_work);
1283 mutex_lock(&dev->link_lock);
1284 emac_netif_stop(dev);
1286 mutex_unlock(&dev->link_lock);
1288 emac_rx_disable(dev);
1289 emac_tx_disable(dev);
1290 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1291 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1292 mal_poll_del(dev->mal, &dev->commac);
1294 emac_clean_tx_ring(dev);
1295 emac_clean_rx_ring(dev);
1297 free_irq(dev->emac_irq, dev);
1302 static inline u16 emac_tx_csum(struct emac_instance *dev,
1303 struct sk_buff *skb)
1305 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1306 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1307 ++dev->stats.tx_packets_csum;
1308 return EMAC_TX_CTRL_TAH_CSUM;
1313 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1315 struct emac_regs __iomem *p = dev->emacp;
1316 struct net_device *ndev = dev->ndev;
1318 /* Send the packet out. If the if makes a significant perf
1319 * difference, then we can store the TMR0 value in "dev"
1322 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1323 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1325 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1327 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1328 netif_stop_queue(ndev);
1329 DBG2(dev, "stopped TX queue" NL);
1332 ndev->trans_start = jiffies;
1333 ++dev->stats.tx_packets;
1334 dev->stats.tx_bytes += len;
1340 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1342 struct emac_instance *dev = netdev_priv(ndev);
1343 unsigned int len = skb->len;
1346 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1347 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1349 slot = dev->tx_slot++;
1350 if (dev->tx_slot == NUM_TX_BUFF) {
1352 ctrl |= MAL_TX_CTRL_WRAP;
1355 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1357 dev->tx_skb[slot] = skb;
1358 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1361 dev->tx_desc[slot].data_len = (u16) len;
1363 dev->tx_desc[slot].ctrl = ctrl;
1365 return emac_xmit_finish(dev, len);
1368 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1369 u32 pd, int len, int last, u16 base_ctrl)
1372 u16 ctrl = base_ctrl;
1373 int chunk = min(len, MAL_MAX_TX_SIZE);
1376 slot = (slot + 1) % NUM_TX_BUFF;
1379 ctrl |= MAL_TX_CTRL_LAST;
1380 if (slot == NUM_TX_BUFF - 1)
1381 ctrl |= MAL_TX_CTRL_WRAP;
1383 dev->tx_skb[slot] = NULL;
1384 dev->tx_desc[slot].data_ptr = pd;
1385 dev->tx_desc[slot].data_len = (u16) chunk;
1386 dev->tx_desc[slot].ctrl = ctrl;
1397 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1398 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1400 struct emac_instance *dev = netdev_priv(ndev);
1401 int nr_frags = skb_shinfo(skb)->nr_frags;
1402 int len = skb->len, chunk;
1407 /* This is common "fast" path */
1408 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1409 return emac_start_xmit(skb, ndev);
1411 len -= skb->data_len;
1413 /* Note, this is only an *estimation*, we can still run out of empty
1414 * slots because of the additional fragmentation into
1415 * MAL_MAX_TX_SIZE-sized chunks
1417 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1420 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1421 emac_tx_csum(dev, skb);
1422 slot = dev->tx_slot;
1425 dev->tx_skb[slot] = NULL;
1426 chunk = min(len, MAL_MAX_TX_SIZE);
1427 dev->tx_desc[slot].data_ptr = pd =
1428 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1429 dev->tx_desc[slot].data_len = (u16) chunk;
1432 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1435 for (i = 0; i < nr_frags; ++i) {
1436 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1439 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1442 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1445 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1449 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1451 /* Attach skb to the last slot so we don't release it too early */
1452 dev->tx_skb[slot] = skb;
1454 /* Send the packet out */
1455 if (dev->tx_slot == NUM_TX_BUFF - 1)
1456 ctrl |= MAL_TX_CTRL_WRAP;
1458 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1459 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1461 return emac_xmit_finish(dev, skb->len);
1464 /* Well, too bad. Our previous estimation was overly optimistic.
1467 while (slot != dev->tx_slot) {
1468 dev->tx_desc[slot].ctrl = 0;
1471 slot = NUM_TX_BUFF - 1;
1473 ++dev->estats.tx_undo;
1476 netif_stop_queue(ndev);
1477 DBG2(dev, "stopped TX queue" NL);
1482 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1484 struct emac_error_stats *st = &dev->estats;
1486 DBG(dev, "BD TX error %04x" NL, ctrl);
1489 if (ctrl & EMAC_TX_ST_BFCS)
1490 ++st->tx_bd_bad_fcs;
1491 if (ctrl & EMAC_TX_ST_LCS)
1492 ++st->tx_bd_carrier_loss;
1493 if (ctrl & EMAC_TX_ST_ED)
1494 ++st->tx_bd_excessive_deferral;
1495 if (ctrl & EMAC_TX_ST_EC)
1496 ++st->tx_bd_excessive_collisions;
1497 if (ctrl & EMAC_TX_ST_LC)
1498 ++st->tx_bd_late_collision;
1499 if (ctrl & EMAC_TX_ST_MC)
1500 ++st->tx_bd_multple_collisions;
1501 if (ctrl & EMAC_TX_ST_SC)
1502 ++st->tx_bd_single_collision;
1503 if (ctrl & EMAC_TX_ST_UR)
1504 ++st->tx_bd_underrun;
1505 if (ctrl & EMAC_TX_ST_SQE)
1509 static void emac_poll_tx(void *param)
1511 struct emac_instance *dev = param;
1514 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1516 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1517 bad_mask = EMAC_IS_BAD_TX_TAH;
1519 bad_mask = EMAC_IS_BAD_TX;
1521 netif_tx_lock_bh(dev->ndev);
1524 int slot = dev->ack_slot, n = 0;
1526 ctrl = dev->tx_desc[slot].ctrl;
1527 if (!(ctrl & MAL_TX_CTRL_READY)) {
1528 struct sk_buff *skb = dev->tx_skb[slot];
1533 dev->tx_skb[slot] = NULL;
1535 slot = (slot + 1) % NUM_TX_BUFF;
1537 if (unlikely(ctrl & bad_mask))
1538 emac_parse_tx_error(dev, ctrl);
1544 dev->ack_slot = slot;
1545 if (netif_queue_stopped(dev->ndev) &&
1546 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1547 netif_wake_queue(dev->ndev);
1549 DBG2(dev, "tx %d pkts" NL, n);
1552 netif_tx_unlock_bh(dev->ndev);
1555 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1558 struct sk_buff *skb = dev->rx_skb[slot];
1560 DBG2(dev, "recycle %d %d" NL, slot, len);
1563 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1564 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1566 dev->rx_desc[slot].data_len = 0;
1568 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1569 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1572 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1574 struct emac_error_stats *st = &dev->estats;
1576 DBG(dev, "BD RX error %04x" NL, ctrl);
1579 if (ctrl & EMAC_RX_ST_OE)
1580 ++st->rx_bd_overrun;
1581 if (ctrl & EMAC_RX_ST_BP)
1582 ++st->rx_bd_bad_packet;
1583 if (ctrl & EMAC_RX_ST_RP)
1584 ++st->rx_bd_runt_packet;
1585 if (ctrl & EMAC_RX_ST_SE)
1586 ++st->rx_bd_short_event;
1587 if (ctrl & EMAC_RX_ST_AE)
1588 ++st->rx_bd_alignment_error;
1589 if (ctrl & EMAC_RX_ST_BFCS)
1590 ++st->rx_bd_bad_fcs;
1591 if (ctrl & EMAC_RX_ST_PTL)
1592 ++st->rx_bd_packet_too_long;
1593 if (ctrl & EMAC_RX_ST_ORE)
1594 ++st->rx_bd_out_of_range;
1595 if (ctrl & EMAC_RX_ST_IRE)
1596 ++st->rx_bd_in_range;
1599 static inline void emac_rx_csum(struct emac_instance *dev,
1600 struct sk_buff *skb, u16 ctrl)
1602 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1603 if (!ctrl && dev->tah_dev) {
1604 skb->ip_summed = CHECKSUM_UNNECESSARY;
1605 ++dev->stats.rx_packets_csum;
1610 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1612 if (likely(dev->rx_sg_skb != NULL)) {
1613 int len = dev->rx_desc[slot].data_len;
1614 int tot_len = dev->rx_sg_skb->len + len;
1616 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1617 ++dev->estats.rx_dropped_mtu;
1618 dev_kfree_skb(dev->rx_sg_skb);
1619 dev->rx_sg_skb = NULL;
1621 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1622 dev->rx_skb[slot]->data, len);
1623 skb_put(dev->rx_sg_skb, len);
1624 emac_recycle_rx_skb(dev, slot, len);
1628 emac_recycle_rx_skb(dev, slot, 0);
1632 /* NAPI poll context */
1633 static int emac_poll_rx(void *param, int budget)
1635 struct emac_instance *dev = param;
1636 int slot = dev->rx_slot, received = 0;
1638 DBG2(dev, "poll_rx(%d)" NL, budget);
1641 while (budget > 0) {
1643 struct sk_buff *skb;
1644 u16 ctrl = dev->rx_desc[slot].ctrl;
1646 if (ctrl & MAL_RX_CTRL_EMPTY)
1649 skb = dev->rx_skb[slot];
1651 len = dev->rx_desc[slot].data_len;
1653 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1656 ctrl &= EMAC_BAD_RX_MASK;
1657 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1658 emac_parse_rx_error(dev, ctrl);
1659 ++dev->estats.rx_dropped_error;
1660 emac_recycle_rx_skb(dev, slot, 0);
1665 if (len < ETH_HLEN) {
1666 ++dev->estats.rx_dropped_stack;
1667 emac_recycle_rx_skb(dev, slot, len);
1671 if (len && len < EMAC_RX_COPY_THRESH) {
1672 struct sk_buff *copy_skb =
1673 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1674 if (unlikely(!copy_skb))
1677 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1678 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1680 emac_recycle_rx_skb(dev, slot, len);
1682 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1687 skb->dev = dev->ndev;
1688 skb->protocol = eth_type_trans(skb, dev->ndev);
1689 emac_rx_csum(dev, skb, ctrl);
1691 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1692 ++dev->estats.rx_dropped_stack;
1694 ++dev->stats.rx_packets;
1696 dev->stats.rx_bytes += len;
1697 slot = (slot + 1) % NUM_RX_BUFF;
1702 if (ctrl & MAL_RX_CTRL_FIRST) {
1703 BUG_ON(dev->rx_sg_skb);
1704 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1705 DBG(dev, "rx OOM %d" NL, slot);
1706 ++dev->estats.rx_dropped_oom;
1707 emac_recycle_rx_skb(dev, slot, 0);
1709 dev->rx_sg_skb = skb;
1712 } else if (!emac_rx_sg_append(dev, slot) &&
1713 (ctrl & MAL_RX_CTRL_LAST)) {
1715 skb = dev->rx_sg_skb;
1716 dev->rx_sg_skb = NULL;
1718 ctrl &= EMAC_BAD_RX_MASK;
1719 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1720 emac_parse_rx_error(dev, ctrl);
1721 ++dev->estats.rx_dropped_error;
1729 DBG(dev, "rx OOM %d" NL, slot);
1730 /* Drop the packet and recycle skb */
1731 ++dev->estats.rx_dropped_oom;
1732 emac_recycle_rx_skb(dev, slot, 0);
1737 DBG2(dev, "rx %d BDs" NL, received);
1738 dev->rx_slot = slot;
1741 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1743 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1744 DBG2(dev, "rx restart" NL);
1749 if (dev->rx_sg_skb) {
1750 DBG2(dev, "dropping partial rx packet" NL);
1751 ++dev->estats.rx_dropped_error;
1752 dev_kfree_skb(dev->rx_sg_skb);
1753 dev->rx_sg_skb = NULL;
1756 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1757 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1758 emac_rx_enable(dev);
1764 /* NAPI poll context */
1765 static int emac_peek_rx(void *param)
1767 struct emac_instance *dev = param;
1769 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1772 /* NAPI poll context */
1773 static int emac_peek_rx_sg(void *param)
1775 struct emac_instance *dev = param;
1777 int slot = dev->rx_slot;
1779 u16 ctrl = dev->rx_desc[slot].ctrl;
1780 if (ctrl & MAL_RX_CTRL_EMPTY)
1782 else if (ctrl & MAL_RX_CTRL_LAST)
1785 slot = (slot + 1) % NUM_RX_BUFF;
1787 /* I'm just being paranoid here :) */
1788 if (unlikely(slot == dev->rx_slot))
1794 static void emac_rxde(void *param)
1796 struct emac_instance *dev = param;
1798 ++dev->estats.rx_stopped;
1799 emac_rx_disable_async(dev);
1803 static irqreturn_t emac_irq(int irq, void *dev_instance)
1805 struct emac_instance *dev = dev_instance;
1806 struct emac_regs __iomem *p = dev->emacp;
1807 struct emac_error_stats *st = &dev->estats;
1810 spin_lock(&dev->lock);
1812 isr = in_be32(&p->isr);
1813 out_be32(&p->isr, isr);
1815 DBG(dev, "isr = %08x" NL, isr);
1817 if (isr & EMAC4_ISR_TXPE)
1819 if (isr & EMAC4_ISR_RXPE)
1821 if (isr & EMAC4_ISR_TXUE)
1823 if (isr & EMAC4_ISR_RXOE)
1824 ++st->rx_fifo_overrun;
1825 if (isr & EMAC_ISR_OVR)
1827 if (isr & EMAC_ISR_BP)
1828 ++st->rx_bad_packet;
1829 if (isr & EMAC_ISR_RP)
1830 ++st->rx_runt_packet;
1831 if (isr & EMAC_ISR_SE)
1832 ++st->rx_short_event;
1833 if (isr & EMAC_ISR_ALE)
1834 ++st->rx_alignment_error;
1835 if (isr & EMAC_ISR_BFCS)
1837 if (isr & EMAC_ISR_PTLE)
1838 ++st->rx_packet_too_long;
1839 if (isr & EMAC_ISR_ORE)
1840 ++st->rx_out_of_range;
1841 if (isr & EMAC_ISR_IRE)
1843 if (isr & EMAC_ISR_SQE)
1845 if (isr & EMAC_ISR_TE)
1848 spin_unlock(&dev->lock);
1853 static struct net_device_stats *emac_stats(struct net_device *ndev)
1855 struct emac_instance *dev = netdev_priv(ndev);
1856 struct emac_stats *st = &dev->stats;
1857 struct emac_error_stats *est = &dev->estats;
1858 struct net_device_stats *nst = &dev->nstats;
1859 unsigned long flags;
1861 DBG2(dev, "stats" NL);
1863 /* Compute "legacy" statistics */
1864 spin_lock_irqsave(&dev->lock, flags);
1865 nst->rx_packets = (unsigned long)st->rx_packets;
1866 nst->rx_bytes = (unsigned long)st->rx_bytes;
1867 nst->tx_packets = (unsigned long)st->tx_packets;
1868 nst->tx_bytes = (unsigned long)st->tx_bytes;
1869 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1870 est->rx_dropped_error +
1871 est->rx_dropped_resize +
1872 est->rx_dropped_mtu);
1873 nst->tx_dropped = (unsigned long)est->tx_dropped;
1875 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1876 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1877 est->rx_fifo_overrun +
1879 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1880 est->rx_alignment_error);
1881 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1883 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1884 est->rx_bd_short_event +
1885 est->rx_bd_packet_too_long +
1886 est->rx_bd_out_of_range +
1887 est->rx_bd_in_range +
1888 est->rx_runt_packet +
1889 est->rx_short_event +
1890 est->rx_packet_too_long +
1891 est->rx_out_of_range +
1894 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1895 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1897 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1898 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1899 est->tx_bd_excessive_collisions +
1900 est->tx_bd_late_collision +
1901 est->tx_bd_multple_collisions);
1902 spin_unlock_irqrestore(&dev->lock, flags);
1906 static struct mal_commac_ops emac_commac_ops = {
1907 .poll_tx = &emac_poll_tx,
1908 .poll_rx = &emac_poll_rx,
1909 .peek_rx = &emac_peek_rx,
1913 static struct mal_commac_ops emac_commac_sg_ops = {
1914 .poll_tx = &emac_poll_tx,
1915 .poll_rx = &emac_poll_rx,
1916 .peek_rx = &emac_peek_rx_sg,
1920 /* Ethtool support */
1921 static int emac_ethtool_get_settings(struct net_device *ndev,
1922 struct ethtool_cmd *cmd)
1924 struct emac_instance *dev = netdev_priv(ndev);
1926 cmd->supported = dev->phy.features;
1927 cmd->port = PORT_MII;
1928 cmd->phy_address = dev->phy.address;
1930 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1932 mutex_lock(&dev->link_lock);
1933 cmd->advertising = dev->phy.advertising;
1934 cmd->autoneg = dev->phy.autoneg;
1935 cmd->speed = dev->phy.speed;
1936 cmd->duplex = dev->phy.duplex;
1937 mutex_unlock(&dev->link_lock);
1942 static int emac_ethtool_set_settings(struct net_device *ndev,
1943 struct ethtool_cmd *cmd)
1945 struct emac_instance *dev = netdev_priv(ndev);
1946 u32 f = dev->phy.features;
1948 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1949 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1951 /* Basic sanity checks */
1952 if (dev->phy.address < 0)
1954 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1956 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1958 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1961 if (cmd->autoneg == AUTONEG_DISABLE) {
1962 switch (cmd->speed) {
1964 if (cmd->duplex == DUPLEX_HALF
1965 && !(f & SUPPORTED_10baseT_Half))
1967 if (cmd->duplex == DUPLEX_FULL
1968 && !(f & SUPPORTED_10baseT_Full))
1972 if (cmd->duplex == DUPLEX_HALF
1973 && !(f & SUPPORTED_100baseT_Half))
1975 if (cmd->duplex == DUPLEX_FULL
1976 && !(f & SUPPORTED_100baseT_Full))
1980 if (cmd->duplex == DUPLEX_HALF
1981 && !(f & SUPPORTED_1000baseT_Half))
1983 if (cmd->duplex == DUPLEX_FULL
1984 && !(f & SUPPORTED_1000baseT_Full))
1991 mutex_lock(&dev->link_lock);
1992 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1994 mutex_unlock(&dev->link_lock);
1997 if (!(f & SUPPORTED_Autoneg))
2000 mutex_lock(&dev->link_lock);
2001 dev->phy.def->ops->setup_aneg(&dev->phy,
2002 (cmd->advertising & f) |
2003 (dev->phy.advertising &
2005 ADVERTISED_Asym_Pause)));
2006 mutex_unlock(&dev->link_lock);
2008 emac_force_link_update(dev);
2013 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2014 struct ethtool_ringparam *rp)
2016 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2017 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2020 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2021 struct ethtool_pauseparam *pp)
2023 struct emac_instance *dev = netdev_priv(ndev);
2025 mutex_lock(&dev->link_lock);
2026 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2027 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2030 if (dev->phy.duplex == DUPLEX_FULL) {
2032 pp->rx_pause = pp->tx_pause = 1;
2033 else if (dev->phy.asym_pause)
2036 mutex_unlock(&dev->link_lock);
2039 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2041 struct emac_instance *dev = netdev_priv(ndev);
2043 return dev->tah_dev != NULL;
2046 static int emac_get_regs_len(struct emac_instance *dev)
2048 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2049 return sizeof(struct emac_ethtool_regs_subhdr) +
2050 EMAC4_ETHTOOL_REGS_SIZE(dev);
2052 return sizeof(struct emac_ethtool_regs_subhdr) +
2053 EMAC_ETHTOOL_REGS_SIZE(dev);
2056 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2058 struct emac_instance *dev = netdev_priv(ndev);
2061 size = sizeof(struct emac_ethtool_regs_hdr) +
2062 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2063 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2064 size += zmii_get_regs_len(dev->zmii_dev);
2065 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2066 size += rgmii_get_regs_len(dev->rgmii_dev);
2067 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2068 size += tah_get_regs_len(dev->tah_dev);
2073 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2075 struct emac_ethtool_regs_subhdr *hdr = buf;
2077 hdr->index = dev->cell_index;
2078 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2079 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2080 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2081 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2083 hdr->version = EMAC_ETHTOOL_REGS_VER;
2084 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2085 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2089 static void emac_ethtool_get_regs(struct net_device *ndev,
2090 struct ethtool_regs *regs, void *buf)
2092 struct emac_instance *dev = netdev_priv(ndev);
2093 struct emac_ethtool_regs_hdr *hdr = buf;
2095 hdr->components = 0;
2098 buf = mal_dump_regs(dev->mal, buf);
2099 buf = emac_dump_regs(dev, buf);
2100 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2101 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2102 buf = zmii_dump_regs(dev->zmii_dev, buf);
2104 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2105 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2106 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2108 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2109 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2110 buf = tah_dump_regs(dev->tah_dev, buf);
2114 static int emac_ethtool_nway_reset(struct net_device *ndev)
2116 struct emac_instance *dev = netdev_priv(ndev);
2119 DBG(dev, "nway_reset" NL);
2121 if (dev->phy.address < 0)
2124 mutex_lock(&dev->link_lock);
2125 if (!dev->phy.autoneg) {
2130 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2132 mutex_unlock(&dev->link_lock);
2133 emac_force_link_update(dev);
2137 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2139 return EMAC_ETHTOOL_STATS_COUNT;
2142 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2145 if (stringset == ETH_SS_STATS)
2146 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2149 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2150 struct ethtool_stats *estats,
2153 struct emac_instance *dev = netdev_priv(ndev);
2155 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2156 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2157 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2160 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2161 struct ethtool_drvinfo *info)
2163 struct emac_instance *dev = netdev_priv(ndev);
2165 strcpy(info->driver, "ibm_emac");
2166 strcpy(info->version, DRV_VERSION);
2167 info->fw_version[0] = '\0';
2168 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2169 dev->cell_index, dev->ofdev->node->full_name);
2170 info->n_stats = emac_ethtool_get_stats_count(ndev);
2171 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2174 static const struct ethtool_ops emac_ethtool_ops = {
2175 .get_settings = emac_ethtool_get_settings,
2176 .set_settings = emac_ethtool_set_settings,
2177 .get_drvinfo = emac_ethtool_get_drvinfo,
2179 .get_regs_len = emac_ethtool_get_regs_len,
2180 .get_regs = emac_ethtool_get_regs,
2182 .nway_reset = emac_ethtool_nway_reset,
2184 .get_ringparam = emac_ethtool_get_ringparam,
2185 .get_pauseparam = emac_ethtool_get_pauseparam,
2187 .get_rx_csum = emac_ethtool_get_rx_csum,
2189 .get_strings = emac_ethtool_get_strings,
2190 .get_stats_count = emac_ethtool_get_stats_count,
2191 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2193 .get_link = ethtool_op_get_link,
2194 .get_tx_csum = ethtool_op_get_tx_csum,
2195 .get_sg = ethtool_op_get_sg,
2198 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2200 struct emac_instance *dev = netdev_priv(ndev);
2201 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2203 DBG(dev, "ioctl %08x" NL, cmd);
2205 if (dev->phy.address < 0)
2210 case SIOCDEVPRIVATE:
2211 data[0] = dev->phy.address;
2214 case SIOCDEVPRIVATE + 1:
2215 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2219 case SIOCDEVPRIVATE + 2:
2220 if (!capable(CAP_NET_ADMIN))
2222 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2229 struct emac_depentry {
2231 struct device_node *node;
2232 struct of_device *ofdev;
2236 #define EMAC_DEP_MAL_IDX 0
2237 #define EMAC_DEP_ZMII_IDX 1
2238 #define EMAC_DEP_RGMII_IDX 2
2239 #define EMAC_DEP_TAH_IDX 3
2240 #define EMAC_DEP_MDIO_IDX 4
2241 #define EMAC_DEP_PREV_IDX 5
2242 #define EMAC_DEP_COUNT 6
2244 static int __devinit emac_check_deps(struct emac_instance *dev,
2245 struct emac_depentry *deps)
2248 struct device_node *np;
2250 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2251 /* no dependency on that item, allright */
2252 if (deps[i].phandle == 0) {
2256 /* special case for blist as the dependency might go away */
2257 if (i == EMAC_DEP_PREV_IDX) {
2258 np = *(dev->blist - 1);
2260 deps[i].phandle = 0;
2264 if (deps[i].node == NULL)
2265 deps[i].node = of_node_get(np);
2267 if (deps[i].node == NULL)
2268 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2269 if (deps[i].node == NULL)
2271 if (deps[i].ofdev == NULL)
2272 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2273 if (deps[i].ofdev == NULL)
2275 if (deps[i].drvdata == NULL)
2276 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2277 if (deps[i].drvdata != NULL)
2280 return (there == EMAC_DEP_COUNT);
2283 static void emac_put_deps(struct emac_instance *dev)
2286 of_dev_put(dev->mal_dev);
2288 of_dev_put(dev->zmii_dev);
2290 of_dev_put(dev->rgmii_dev);
2292 of_dev_put(dev->mdio_dev);
2294 of_dev_put(dev->tah_dev);
2297 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2298 unsigned long action, void *data)
2300 /* We are only intereted in device addition */
2301 if (action == BUS_NOTIFY_BOUND_DRIVER)
2302 wake_up_all(&emac_probe_wait);
2306 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2307 .notifier_call = emac_of_bus_notify
2310 static int __devinit emac_wait_deps(struct emac_instance *dev)
2312 struct emac_depentry deps[EMAC_DEP_COUNT];
2315 memset(&deps, 0, sizeof(deps));
2317 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2318 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2319 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2321 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2323 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2324 if (dev->blist && dev->blist > emac_boot_list)
2325 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2326 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2327 wait_event_timeout(emac_probe_wait,
2328 emac_check_deps(dev, deps),
2329 EMAC_PROBE_DEP_TIMEOUT);
2330 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2331 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2332 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2334 of_node_put(deps[i].node);
2335 if (err && deps[i].ofdev)
2336 of_dev_put(deps[i].ofdev);
2339 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2340 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2341 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2342 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2343 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2345 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2346 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2350 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2351 u32 *val, int fatal)
2354 const u32 *prop = of_get_property(np, name, &len);
2355 if (prop == NULL || len < sizeof(u32)) {
2357 printk(KERN_ERR "%s: missing %s property\n",
2358 np->full_name, name);
2365 static int __devinit emac_init_phy(struct emac_instance *dev)
2367 struct device_node *np = dev->ofdev->node;
2368 struct net_device *ndev = dev->ndev;
2372 dev->phy.dev = ndev;
2373 dev->phy.mode = dev->phy_mode;
2375 /* PHY-less configuration.
2376 * XXX I probably should move these settings to the dev tree
2378 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2381 /* PHY-less configuration.
2382 * XXX I probably should move these settings to the dev tree
2384 dev->phy.address = -1;
2385 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2391 mutex_lock(&emac_phy_map_lock);
2392 phy_map = dev->phy_map | busy_phy_map;
2394 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2396 dev->phy.mdio_read = emac_mdio_read;
2397 dev->phy.mdio_write = emac_mdio_write;
2399 /* Enable internal clock source */
2400 #ifdef CONFIG_PPC_DCR_NATIVE
2401 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2402 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2404 /* PHY clock workaround */
2405 emac_rx_clk_tx(dev);
2407 /* Enable internal clock source on 440GX*/
2408 #ifdef CONFIG_PPC_DCR_NATIVE
2409 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2410 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2412 /* Configure EMAC with defaults so we can at least use MDIO
2413 * This is needed mostly for 440GX
2415 if (emac_phy_gpcs(dev->phy.mode)) {
2417 * Make GPCS PHY address equal to EMAC index.
2418 * We probably should take into account busy_phy_map
2419 * and/or phy_map here.
2421 * Note that the busy_phy_map is currently global
2422 * while it should probably be per-ASIC...
2424 dev->phy.address = dev->cell_index;
2427 emac_configure(dev);
2429 if (dev->phy_address != 0xffffffff)
2430 phy_map = ~(1 << dev->phy_address);
2432 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2433 if (!(phy_map & 1)) {
2435 busy_phy_map |= 1 << i;
2437 /* Quick check if there is a PHY at the address */
2438 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2439 if (r == 0xffff || r < 0)
2441 if (!emac_mii_phy_probe(&dev->phy, i))
2445 /* Enable external clock source */
2446 #ifdef CONFIG_PPC_DCR_NATIVE
2447 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2448 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2450 mutex_unlock(&emac_phy_map_lock);
2452 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2457 if (dev->phy.def->ops->init)
2458 dev->phy.def->ops->init(&dev->phy);
2460 /* Disable any PHY features not supported by the platform */
2461 dev->phy.def->features &= ~dev->phy_feat_exc;
2463 /* Setup initial link parameters */
2464 if (dev->phy.features & SUPPORTED_Autoneg) {
2465 adv = dev->phy.features;
2466 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2467 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2468 /* Restart autonegotiation */
2469 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2471 u32 f = dev->phy.def->features;
2472 int speed = SPEED_10, fd = DUPLEX_HALF;
2474 /* Select highest supported speed/duplex */
2475 if (f & SUPPORTED_1000baseT_Full) {
2478 } else if (f & SUPPORTED_1000baseT_Half)
2480 else if (f & SUPPORTED_100baseT_Full) {
2483 } else if (f & SUPPORTED_100baseT_Half)
2485 else if (f & SUPPORTED_10baseT_Full)
2488 /* Force link parameters */
2489 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2494 static int __devinit emac_init_config(struct emac_instance *dev)
2496 struct device_node *np = dev->ofdev->node;
2499 const char *pm, *phy_modes[] = {
2501 [PHY_MODE_MII] = "mii",
2502 [PHY_MODE_RMII] = "rmii",
2503 [PHY_MODE_SMII] = "smii",
2504 [PHY_MODE_RGMII] = "rgmii",
2505 [PHY_MODE_TBI] = "tbi",
2506 [PHY_MODE_GMII] = "gmii",
2507 [PHY_MODE_RTBI] = "rtbi",
2508 [PHY_MODE_SGMII] = "sgmii",
2511 /* Read config from device-tree */
2512 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2514 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2516 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2518 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2520 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2521 dev->max_mtu = 1500;
2522 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2523 dev->rx_fifo_size = 2048;
2524 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2525 dev->tx_fifo_size = 2048;
2526 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2527 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2528 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2529 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2530 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2531 dev->phy_address = 0xffffffff;
2532 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2533 dev->phy_map = 0xffffffff;
2534 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2536 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2538 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2540 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2542 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2544 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2545 dev->zmii_port = 0xffffffff;;
2546 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2548 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2549 dev->rgmii_port = 0xffffffff;;
2550 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2551 dev->fifo_entry_size = 16;
2552 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2553 dev->mal_burst_size = 256;
2555 /* PHY mode needs some decoding */
2556 dev->phy_mode = PHY_MODE_NA;
2557 pm = of_get_property(np, "phy-mode", &plen);
2560 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2561 if (!strcasecmp(pm, phy_modes[i])) {
2567 /* Backward compat with non-final DT */
2568 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2569 u32 nmode = *(const u32 *)pm;
2570 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2571 dev->phy_mode = nmode;
2574 /* Check EMAC version */
2575 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2576 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2577 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2578 of_device_is_compatible(np, "ibm,emac-460gt"))
2579 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2580 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2581 dev->features |= EMAC_FTR_EMAC4;
2582 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2583 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2585 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2586 of_device_is_compatible(np, "ibm,emac-440gr"))
2587 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2590 /* Fixup some feature bits based on the device tree */
2591 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2592 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2593 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2594 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2596 /* CAB lacks the appropriate properties */
2597 if (of_device_is_compatible(np, "ibm,emac-axon"))
2598 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2599 EMAC_FTR_STACR_OC_INVERT;
2601 /* Enable TAH/ZMII/RGMII features as found */
2602 if (dev->tah_ph != 0) {
2603 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2604 dev->features |= EMAC_FTR_HAS_TAH;
2606 printk(KERN_ERR "%s: TAH support not enabled !\n",
2612 if (dev->zmii_ph != 0) {
2613 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2614 dev->features |= EMAC_FTR_HAS_ZMII;
2616 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2622 if (dev->rgmii_ph != 0) {
2623 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2624 dev->features |= EMAC_FTR_HAS_RGMII;
2626 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2632 /* Read MAC-address */
2633 p = of_get_property(np, "local-mac-address", NULL);
2635 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2639 memcpy(dev->ndev->dev_addr, p, 6);
2641 /* IAHT and GAHT filter parameterization */
2642 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2643 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2644 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2646 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2647 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2650 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2651 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2652 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2653 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2654 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2659 static int __devinit emac_probe(struct of_device *ofdev,
2660 const struct of_device_id *match)
2662 struct net_device *ndev;
2663 struct emac_instance *dev;
2664 struct device_node *np = ofdev->node;
2665 struct device_node **blist = NULL;
2668 /* Skip unused/unwired EMACS. We leave the check for an unused
2669 * property here for now, but new flat device trees should set a
2670 * status property to "disabled" instead.
2672 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2675 /* Find ourselves in the bootlist if we are there */
2676 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2677 if (emac_boot_list[i] == np)
2678 blist = &emac_boot_list[i];
2680 /* Allocate our net_device structure */
2682 ndev = alloc_etherdev(sizeof(struct emac_instance));
2684 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2688 dev = netdev_priv(ndev);
2692 SET_NETDEV_DEV(ndev, &ofdev->dev);
2694 /* Initialize some embedded data structures */
2695 mutex_init(&dev->mdio_lock);
2696 mutex_init(&dev->link_lock);
2697 spin_lock_init(&dev->lock);
2698 INIT_WORK(&dev->reset_work, emac_reset_work);
2700 /* Init various config data based on device-tree */
2701 err = emac_init_config(dev);
2705 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2706 dev->emac_irq = irq_of_parse_and_map(np, 0);
2707 dev->wol_irq = irq_of_parse_and_map(np, 1);
2708 if (dev->emac_irq == NO_IRQ) {
2709 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2712 ndev->irq = dev->emac_irq;
2715 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2716 printk(KERN_ERR "%s: Can't get registers address\n",
2720 // TODO : request_mem_region
2721 dev->emacp = ioremap(dev->rsrc_regs.start,
2722 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2723 if (dev->emacp == NULL) {
2724 printk(KERN_ERR "%s: Can't map device registers!\n",
2730 /* Wait for dependent devices */
2731 err = emac_wait_deps(dev);
2734 "%s: Timeout waiting for dependent devices\n",
2736 /* display more info about what's missing ? */
2739 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2740 if (dev->mdio_dev != NULL)
2741 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2743 /* Register with MAL */
2744 dev->commac.ops = &emac_commac_ops;
2745 dev->commac.dev = dev;
2746 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2747 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2748 err = mal_register_commac(dev->mal, &dev->commac);
2750 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2751 np->full_name, dev->mal_dev->node->full_name);
2754 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2755 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2757 /* Get pointers to BD rings */
2759 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2761 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2763 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2764 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2767 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2768 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2769 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2770 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2772 /* Attach to ZMII, if needed */
2773 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2774 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2775 goto err_unreg_commac;
2777 /* Attach to RGMII, if needed */
2778 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2779 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2780 goto err_detach_zmii;
2782 /* Attach to TAH, if needed */
2783 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2784 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2785 goto err_detach_rgmii;
2787 /* Set some link defaults before we can find out real parameters */
2788 dev->phy.speed = SPEED_100;
2789 dev->phy.duplex = DUPLEX_FULL;
2790 dev->phy.autoneg = AUTONEG_DISABLE;
2791 dev->phy.pause = dev->phy.asym_pause = 0;
2792 dev->stop_timeout = STOP_TIMEOUT_100;
2793 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2795 /* Find PHY if any */
2796 err = emac_init_phy(dev);
2798 goto err_detach_tah;
2800 /* Fill in the driver function table */
2801 ndev->open = &emac_open;
2803 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2804 ndev->tx_timeout = &emac_tx_timeout;
2805 ndev->watchdog_timeo = 5 * HZ;
2806 ndev->stop = &emac_close;
2807 ndev->get_stats = &emac_stats;
2808 ndev->set_multicast_list = &emac_set_multicast_list;
2809 ndev->do_ioctl = &emac_ioctl;
2810 if (emac_phy_supports_gige(dev->phy_mode)) {
2811 ndev->hard_start_xmit = &emac_start_xmit_sg;
2812 ndev->change_mtu = &emac_change_mtu;
2813 dev->commac.ops = &emac_commac_sg_ops;
2815 ndev->hard_start_xmit = &emac_start_xmit;
2817 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2819 netif_carrier_off(ndev);
2820 netif_stop_queue(ndev);
2822 err = register_netdev(ndev);
2824 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2825 np->full_name, err);
2826 goto err_detach_tah;
2829 /* Set our drvdata last as we don't want them visible until we are
2833 dev_set_drvdata(&ofdev->dev, dev);
2835 /* There's a new kid in town ! Let's tell everybody */
2836 wake_up_all(&emac_probe_wait);
2840 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2841 ndev->name, dev->cell_index, np->full_name,
2842 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2843 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2845 if (dev->phy.address >= 0)
2846 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2847 dev->phy.def->name, dev->phy.address);
2849 emac_dbg_register(dev);
2854 /* I have a bad feeling about this ... */
2857 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2858 tah_detach(dev->tah_dev, dev->tah_port);
2860 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2861 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2863 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2864 zmii_detach(dev->zmii_dev, dev->zmii_port);
2866 mal_unregister_commac(dev->mal, &dev->commac);
2870 iounmap(dev->emacp);
2872 if (dev->wol_irq != NO_IRQ)
2873 irq_dispose_mapping(dev->wol_irq);
2874 if (dev->emac_irq != NO_IRQ)
2875 irq_dispose_mapping(dev->emac_irq);
2879 /* if we were on the bootlist, remove us as we won't show up and
2880 * wake up all waiters to notify them in case they were waiting
2885 wake_up_all(&emac_probe_wait);
2890 static int __devexit emac_remove(struct of_device *ofdev)
2892 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2894 DBG(dev, "remove" NL);
2896 dev_set_drvdata(&ofdev->dev, NULL);
2898 unregister_netdev(dev->ndev);
2900 flush_scheduled_work();
2902 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2903 tah_detach(dev->tah_dev, dev->tah_port);
2904 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2905 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2906 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2907 zmii_detach(dev->zmii_dev, dev->zmii_port);
2909 mal_unregister_commac(dev->mal, &dev->commac);
2912 emac_dbg_unregister(dev);
2913 iounmap(dev->emacp);
2915 if (dev->wol_irq != NO_IRQ)
2916 irq_dispose_mapping(dev->wol_irq);
2917 if (dev->emac_irq != NO_IRQ)
2918 irq_dispose_mapping(dev->emac_irq);
2925 /* XXX Features in here should be replaced by properties... */
2926 static struct of_device_id emac_match[] =
2930 .compatible = "ibm,emac",
2934 .compatible = "ibm,emac4",
2938 .compatible = "ibm,emac4sync",
2943 static struct of_platform_driver emac_driver = {
2945 .match_table = emac_match,
2947 .probe = emac_probe,
2948 .remove = emac_remove,
2951 static void __init emac_make_bootlist(void)
2953 struct device_node *np = NULL;
2954 int j, max, i = 0, k;
2955 int cell_indices[EMAC_BOOT_LIST_SIZE];
2958 while((np = of_find_all_nodes(np)) != NULL) {
2961 if (of_match_node(emac_match, np) == NULL)
2963 if (of_get_property(np, "unused", NULL))
2965 idx = of_get_property(np, "cell-index", NULL);
2968 cell_indices[i] = *idx;
2969 emac_boot_list[i++] = of_node_get(np);
2970 if (i >= EMAC_BOOT_LIST_SIZE) {
2977 /* Bubble sort them (doh, what a creative algorithm :-) */
2978 for (i = 0; max > 1 && (i < (max - 1)); i++)
2979 for (j = i; j < max; j++) {
2980 if (cell_indices[i] > cell_indices[j]) {
2981 np = emac_boot_list[i];
2982 emac_boot_list[i] = emac_boot_list[j];
2983 emac_boot_list[j] = np;
2984 k = cell_indices[i];
2985 cell_indices[i] = cell_indices[j];
2986 cell_indices[j] = k;
2991 static int __init emac_init(void)
2995 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2997 /* Init debug stuff */
3000 /* Build EMAC boot list */
3001 emac_make_bootlist();
3003 /* Init submodules */
3016 rc = of_register_platform_driver(&emac_driver);
3034 static void __exit emac_exit(void)
3038 of_unregister_platform_driver(&emac_driver);
3046 /* Destroy EMAC boot list */
3047 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3048 if (emac_boot_list[i])
3049 of_node_put(emac_boot_list[i]);
3052 module_init(emac_init);
3053 module_exit(emac_exit);