2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_460EX_PHY_CLK_FIX |
134 EMAC_FTR_440EP_PHY_CLK_FIX))
135 DBG(dev, "%s" NL, error);
136 else if (net_ratelimit())
137 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
140 /* EMAC PHY clock workaround:
141 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142 * which allows controlling each EMAC clock
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
146 #ifdef CONFIG_PPC_DCR_NATIVE
147 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 dcri_clrset(SDR0, SDR0_MFR,
149 0, SDR0_MFR_ECS >> dev->cell_index);
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
155 #ifdef CONFIG_PPC_DCR_NATIVE
156 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 dcri_clrset(SDR0, SDR0_MFR,
158 SDR0_MFR_ECS >> dev->cell_index, 0);
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON HZ
164 #define PHY_POLL_LINK_OFF (HZ / 5)
166 /* Graceful stop timeouts in us.
167 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
169 #define STOP_TIMEOUT_10 1230
170 #define STOP_TIMEOUT_100 124
171 #define STOP_TIMEOUT_1000 13
172 #define STOP_TIMEOUT_1000_JUMBO 73
174 static unsigned char default_mcast_addr[] = {
175 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 "tx_bd_excessive_collisions", "tx_bd_late_collision",
192 "tx_bd_multple_collisions", "tx_bd_single_collision",
193 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
201 static inline int emac_phy_supports_gige(int phy_mode)
203 return phy_mode == PHY_MODE_GMII ||
204 phy_mode == PHY_MODE_RGMII ||
205 phy_mode == PHY_MODE_SGMII ||
206 phy_mode == PHY_MODE_TBI ||
207 phy_mode == PHY_MODE_RTBI;
210 static inline int emac_phy_gpcs(int phy_mode)
212 return phy_mode == PHY_MODE_SGMII ||
213 phy_mode == PHY_MODE_TBI ||
214 phy_mode == PHY_MODE_RTBI;
217 static inline void emac_tx_enable(struct emac_instance *dev)
219 struct emac_regs __iomem *p = dev->emacp;
222 DBG(dev, "tx_enable" NL);
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_TXE))
226 out_be32(&p->mr0, r | EMAC_MR0_TXE);
229 static void emac_tx_disable(struct emac_instance *dev)
231 struct emac_regs __iomem *p = dev->emacp;
234 DBG(dev, "tx_disable" NL);
236 r = in_be32(&p->mr0);
237 if (r & EMAC_MR0_TXE) {
238 int n = dev->stop_timeout;
239 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
240 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 emac_report_timeout_error(dev, "TX disable timeout");
249 static void emac_rx_enable(struct emac_instance *dev)
251 struct emac_regs __iomem *p = dev->emacp;
254 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
257 DBG(dev, "rx_enable" NL);
259 r = in_be32(&p->mr0);
260 if (!(r & EMAC_MR0_RXE)) {
261 if (unlikely(!(r & EMAC_MR0_RXI))) {
262 /* Wait if previous async disable is still in progress */
263 int n = dev->stop_timeout;
264 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 emac_report_timeout_error(dev,
270 "RX disable timeout");
272 out_be32(&p->mr0, r | EMAC_MR0_RXE);
278 static void emac_rx_disable(struct emac_instance *dev)
280 struct emac_regs __iomem *p = dev->emacp;
283 DBG(dev, "rx_disable" NL);
285 r = in_be32(&p->mr0);
286 if (r & EMAC_MR0_RXE) {
287 int n = dev->stop_timeout;
288 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 emac_report_timeout_error(dev, "RX disable timeout");
298 static inline void emac_netif_stop(struct emac_instance *dev)
300 netif_tx_lock_bh(dev->ndev);
301 netif_addr_lock(dev->ndev);
303 netif_addr_unlock(dev->ndev);
304 netif_tx_unlock_bh(dev->ndev);
305 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
306 mal_poll_disable(dev->mal, &dev->commac);
307 netif_tx_disable(dev->ndev);
310 static inline void emac_netif_start(struct emac_instance *dev)
312 netif_tx_lock_bh(dev->ndev);
313 netif_addr_lock(dev->ndev);
315 if (dev->mcast_pending && netif_running(dev->ndev))
316 __emac_set_multicast_list(dev);
317 netif_addr_unlock(dev->ndev);
318 netif_tx_unlock_bh(dev->ndev);
320 netif_wake_queue(dev->ndev);
322 /* NOTE: unconditional netif_wake_queue is only appropriate
323 * so long as all callers are assured to have free tx slots
324 * (taken from tg3... though the case where that is wrong is
325 * not terribly harmful)
327 mal_poll_enable(dev->mal, &dev->commac);
330 static inline void emac_rx_disable_async(struct emac_instance *dev)
332 struct emac_regs __iomem *p = dev->emacp;
335 DBG(dev, "rx_disable_async" NL);
337 r = in_be32(&p->mr0);
338 if (r & EMAC_MR0_RXE)
339 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
342 static int emac_reset(struct emac_instance *dev)
344 struct emac_regs __iomem *p = dev->emacp;
347 DBG(dev, "reset" NL);
349 if (!dev->reset_failed) {
350 /* 40x erratum suggests stopping RX channel before reset,
353 emac_rx_disable(dev);
354 emac_tx_disable(dev);
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 /* Enable internal clock source */
359 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
360 dcri_clrset(SDR0, SDR0_ETH_CFG,
361 0, SDR0_ETH_CFG_ECS << dev->cell_index);
364 out_be32(&p->mr0, EMAC_MR0_SRST);
365 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
368 #ifdef CONFIG_PPC_DCR_NATIVE
369 /* Enable external clock source */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
371 dcri_clrset(SDR0, SDR0_ETH_CFG,
372 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
376 dev->reset_failed = 0;
379 emac_report_timeout_error(dev, "reset timeout");
380 dev->reset_failed = 1;
385 static void emac_hash_mc(struct emac_instance *dev)
387 const int regs = EMAC_XAHT_REGS(dev);
388 u32 *gaht_base = emac_gaht_base(dev);
390 struct dev_mc_list *dmi;
393 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
395 memset(gaht_temp, 0, sizeof (gaht_temp));
397 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
399 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
400 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
401 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
403 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
404 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
405 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
407 gaht_temp[reg] |= mask;
410 for (i = 0; i < regs; i++)
411 out_be32(gaht_base + i, gaht_temp[i]);
414 static inline u32 emac_iff2rmr(struct net_device *ndev)
416 struct emac_instance *dev = netdev_priv(ndev);
419 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
421 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
426 if (ndev->flags & IFF_PROMISC)
428 else if (ndev->flags & IFF_ALLMULTI ||
429 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
431 else if (ndev->mc_count > 0)
437 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
439 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
441 DBG2(dev, "__emac_calc_base_mr1" NL);
445 ret |= EMAC_MR1_TFS_2K;
448 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
449 dev->ndev->name, tx_size);
454 ret |= EMAC_MR1_RFS_16K;
457 ret |= EMAC_MR1_RFS_4K;
460 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
461 dev->ndev->name, rx_size);
467 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
469 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
470 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
472 DBG2(dev, "__emac4_calc_base_mr1" NL);
476 ret |= EMAC4_MR1_TFS_4K;
479 ret |= EMAC4_MR1_TFS_2K;
482 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
483 dev->ndev->name, tx_size);
488 ret |= EMAC4_MR1_RFS_16K;
491 ret |= EMAC4_MR1_RFS_4K;
494 ret |= EMAC4_MR1_RFS_2K;
497 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
498 dev->ndev->name, rx_size);
504 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
506 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
507 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
508 __emac_calc_base_mr1(dev, tx_size, rx_size);
511 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
513 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
514 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
516 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
519 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
520 unsigned int low, unsigned int high)
522 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
523 return (low << 22) | ( (high & 0x3ff) << 6);
525 return (low << 23) | ( (high & 0x1ff) << 7);
528 static int emac_configure(struct emac_instance *dev)
530 struct emac_regs __iomem *p = dev->emacp;
531 struct net_device *ndev = dev->ndev;
532 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
535 DBG(dev, "configure" NL);
538 out_be32(&p->mr1, in_be32(&p->mr1)
539 | EMAC_MR1_FDE | EMAC_MR1_ILE);
541 } else if (emac_reset(dev) < 0)
544 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
545 tah_reset(dev->tah_dev);
547 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
548 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
550 /* Default fifo sizes */
551 tx_size = dev->tx_fifo_size;
552 rx_size = dev->rx_fifo_size;
554 /* No link, force loopback */
556 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
558 /* Check for full duplex */
559 else if (dev->phy.duplex == DUPLEX_FULL)
560 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
562 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
563 dev->stop_timeout = STOP_TIMEOUT_10;
564 switch (dev->phy.speed) {
566 if (emac_phy_gpcs(dev->phy.mode)) {
567 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
568 (dev->phy.gpcs_address != 0xffffffff) ?
569 dev->phy.gpcs_address : dev->phy.address);
571 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
572 * identify this GPCS PHY later.
574 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
576 mr1 |= EMAC_MR1_MF_1000;
578 /* Extended fifo sizes */
579 tx_size = dev->tx_fifo_size_gige;
580 rx_size = dev->rx_fifo_size_gige;
582 if (dev->ndev->mtu > ETH_DATA_LEN) {
583 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
584 mr1 |= EMAC4_MR1_JPSM;
586 mr1 |= EMAC_MR1_JPSM;
587 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
589 dev->stop_timeout = STOP_TIMEOUT_1000;
592 mr1 |= EMAC_MR1_MF_100;
593 dev->stop_timeout = STOP_TIMEOUT_100;
595 default: /* make gcc happy */
599 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
600 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
602 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
603 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
605 /* on 40x erratum forces us to NOT use integrated flow control,
606 * let's hope it works on 44x ;)
608 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
609 dev->phy.duplex == DUPLEX_FULL) {
611 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
612 else if (dev->phy.asym_pause)
616 /* Add base settings & fifo sizes & program MR1 */
617 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
618 out_be32(&p->mr1, mr1);
620 /* Set individual MAC address */
621 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
622 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
623 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
626 /* VLAN Tag Protocol ID */
627 out_be32(&p->vtpid, 0x8100);
629 /* Receive mode register */
630 r = emac_iff2rmr(ndev);
631 if (r & EMAC_RMR_MAE)
633 out_be32(&p->rmr, r);
635 /* FIFOs thresholds */
636 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
637 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
638 tx_size / 2 / dev->fifo_entry_size);
640 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
641 tx_size / 2 / dev->fifo_entry_size);
642 out_be32(&p->tmr1, r);
643 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
645 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
646 there should be still enough space in FIFO to allow the our link
647 partner time to process this frame and also time to send PAUSE
650 Here is the worst case scenario for the RX FIFO "headroom"
651 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
653 1) One maximum-length frame on TX 1522 bytes
654 2) One PAUSE frame time 64 bytes
655 3) PAUSE frame decode time allowance 64 bytes
656 4) One maximum-length frame on RX 1522 bytes
657 5) Round-trip propagation delay of the link (100Mb) 15 bytes
661 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
662 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
664 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
665 rx_size / 4 / dev->fifo_entry_size);
666 out_be32(&p->rwmr, r);
668 /* Set PAUSE timer to the maximum */
669 out_be32(&p->ptr, 0xffff);
672 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
673 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
674 EMAC_ISR_IRE | EMAC_ISR_TE;
675 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
676 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
678 out_be32(&p->iser, r);
680 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
681 if (emac_phy_gpcs(dev->phy.mode)) {
682 if (dev->phy.gpcs_address != 0xffffffff)
683 emac_mii_reset_gpcs(&dev->phy);
685 emac_mii_reset_phy(&dev->phy);
691 static void emac_reinitialize(struct emac_instance *dev)
693 DBG(dev, "reinitialize" NL);
695 emac_netif_stop(dev);
696 if (!emac_configure(dev)) {
700 emac_netif_start(dev);
703 static void emac_full_tx_reset(struct emac_instance *dev)
705 DBG(dev, "full_tx_reset" NL);
707 emac_tx_disable(dev);
708 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
709 emac_clean_tx_ring(dev);
710 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
714 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
719 static void emac_reset_work(struct work_struct *work)
721 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
723 DBG(dev, "reset_work" NL);
725 mutex_lock(&dev->link_lock);
727 emac_netif_stop(dev);
728 emac_full_tx_reset(dev);
729 emac_netif_start(dev);
731 mutex_unlock(&dev->link_lock);
734 static void emac_tx_timeout(struct net_device *ndev)
736 struct emac_instance *dev = netdev_priv(ndev);
738 DBG(dev, "tx_timeout" NL);
740 schedule_work(&dev->reset_work);
744 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
746 int done = !!(stacr & EMAC_STACR_OC);
748 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
754 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
756 struct emac_regs __iomem *p = dev->emacp;
758 int n, err = -ETIMEDOUT;
760 mutex_lock(&dev->mdio_lock);
762 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
764 /* Enable proper MDIO port */
765 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
766 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
767 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
768 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
770 /* Wait for management interface to become idle */
772 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
775 DBG2(dev, " -> timeout wait idle\n");
780 /* Issue read command */
781 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
782 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
784 r = EMAC_STACR_BASE(dev->opb_bus_freq);
785 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
787 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
788 r |= EMACX_STACR_STAC_READ;
790 r |= EMAC_STACR_STAC_READ;
791 r |= (reg & EMAC_STACR_PRA_MASK)
792 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
793 out_be32(&p->stacr, r);
795 /* Wait for read to complete */
797 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
800 DBG2(dev, " -> timeout wait complete\n");
805 if (unlikely(r & EMAC_STACR_PHYE)) {
806 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
811 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
813 DBG2(dev, "mdio_read -> %04x" NL, r);
816 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
817 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
818 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
819 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
820 mutex_unlock(&dev->mdio_lock);
822 return err == 0 ? r : err;
825 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
828 struct emac_regs __iomem *p = dev->emacp;
830 int n, err = -ETIMEDOUT;
832 mutex_lock(&dev->mdio_lock);
834 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
836 /* Enable proper MDIO port */
837 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
838 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
839 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
840 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
842 /* Wait for management interface to be idle */
844 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
847 DBG2(dev, " -> timeout wait idle\n");
852 /* Issue write command */
853 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
854 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
856 r = EMAC_STACR_BASE(dev->opb_bus_freq);
857 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
859 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
860 r |= EMACX_STACR_STAC_WRITE;
862 r |= EMAC_STACR_STAC_WRITE;
863 r |= (reg & EMAC_STACR_PRA_MASK) |
864 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
865 (val << EMAC_STACR_PHYD_SHIFT);
866 out_be32(&p->stacr, r);
868 /* Wait for write to complete */
870 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
873 DBG2(dev, " -> timeout wait complete\n");
879 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
880 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
881 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
882 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
883 mutex_unlock(&dev->mdio_lock);
886 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
888 struct emac_instance *dev = netdev_priv(ndev);
891 res = __emac_mdio_read((dev->mdio_instance &&
892 dev->phy.gpcs_address != id) ?
893 dev->mdio_instance : dev,
898 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
900 struct emac_instance *dev = netdev_priv(ndev);
902 __emac_mdio_write((dev->mdio_instance &&
903 dev->phy.gpcs_address != id) ?
904 dev->mdio_instance : dev,
905 (u8) id, (u8) reg, (u16) val);
909 static void __emac_set_multicast_list(struct emac_instance *dev)
911 struct emac_regs __iomem *p = dev->emacp;
912 u32 rmr = emac_iff2rmr(dev->ndev);
914 DBG(dev, "__multicast %08x" NL, rmr);
916 /* I decided to relax register access rules here to avoid
919 * There is a real problem with EMAC4 core if we use MWSW_001 bit
920 * in MR1 register and do a full EMAC reset.
921 * One TX BD status update is delayed and, after EMAC reset, it
922 * never happens, resulting in TX hung (it'll be recovered by TX
923 * timeout handler eventually, but this is just gross).
924 * So we either have to do full TX reset or try to cheat here :)
926 * The only required change is to RX mode register, so I *think* all
927 * we need is just to stop RX channel. This seems to work on all
930 * If we need the full reset, we might just trigger the workqueue
931 * and do it async... a bit nasty but should work --BenH
933 dev->mcast_pending = 0;
934 emac_rx_disable(dev);
935 if (rmr & EMAC_RMR_MAE)
937 out_be32(&p->rmr, rmr);
942 static void emac_set_multicast_list(struct net_device *ndev)
944 struct emac_instance *dev = netdev_priv(ndev);
946 DBG(dev, "multicast" NL);
948 BUG_ON(!netif_running(dev->ndev));
951 dev->mcast_pending = 1;
954 __emac_set_multicast_list(dev);
957 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
959 int rx_sync_size = emac_rx_sync_size(new_mtu);
960 int rx_skb_size = emac_rx_skb_size(new_mtu);
963 mutex_lock(&dev->link_lock);
964 emac_netif_stop(dev);
965 emac_rx_disable(dev);
966 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
968 if (dev->rx_sg_skb) {
969 ++dev->estats.rx_dropped_resize;
970 dev_kfree_skb(dev->rx_sg_skb);
971 dev->rx_sg_skb = NULL;
974 /* Make a first pass over RX ring and mark BDs ready, dropping
975 * non-processed packets on the way. We need this as a separate pass
976 * to simplify error recovery in the case of allocation failure later.
978 for (i = 0; i < NUM_RX_BUFF; ++i) {
979 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
980 ++dev->estats.rx_dropped_resize;
982 dev->rx_desc[i].data_len = 0;
983 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
984 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
987 /* Reallocate RX ring only if bigger skb buffers are required */
988 if (rx_skb_size <= dev->rx_skb_size)
991 /* Second pass, allocate new skbs */
992 for (i = 0; i < NUM_RX_BUFF; ++i) {
993 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
999 BUG_ON(!dev->rx_skb[i]);
1000 dev_kfree_skb(dev->rx_skb[i]);
1002 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1003 dev->rx_desc[i].data_ptr =
1004 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1005 DMA_FROM_DEVICE) + 2;
1006 dev->rx_skb[i] = skb;
1009 /* Check if we need to change "Jumbo" bit in MR1 */
1010 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1011 /* This is to prevent starting RX channel in emac_rx_enable() */
1012 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1014 dev->ndev->mtu = new_mtu;
1015 emac_full_tx_reset(dev);
1018 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1021 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1023 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1024 emac_rx_enable(dev);
1025 emac_netif_start(dev);
1026 mutex_unlock(&dev->link_lock);
1031 /* Process ctx, rtnl_lock semaphore */
1032 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1034 struct emac_instance *dev = netdev_priv(ndev);
1037 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1040 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1042 if (netif_running(ndev)) {
1043 /* Check if we really need to reinitalize RX ring */
1044 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1045 ret = emac_resize_rx_ring(dev, new_mtu);
1049 ndev->mtu = new_mtu;
1050 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1051 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1057 static void emac_clean_tx_ring(struct emac_instance *dev)
1061 for (i = 0; i < NUM_TX_BUFF; ++i) {
1062 if (dev->tx_skb[i]) {
1063 dev_kfree_skb(dev->tx_skb[i]);
1064 dev->tx_skb[i] = NULL;
1065 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1066 ++dev->estats.tx_dropped;
1068 dev->tx_desc[i].ctrl = 0;
1069 dev->tx_desc[i].data_ptr = 0;
1073 static void emac_clean_rx_ring(struct emac_instance *dev)
1077 for (i = 0; i < NUM_RX_BUFF; ++i)
1078 if (dev->rx_skb[i]) {
1079 dev->rx_desc[i].ctrl = 0;
1080 dev_kfree_skb(dev->rx_skb[i]);
1081 dev->rx_skb[i] = NULL;
1082 dev->rx_desc[i].data_ptr = 0;
1085 if (dev->rx_sg_skb) {
1086 dev_kfree_skb(dev->rx_sg_skb);
1087 dev->rx_sg_skb = NULL;
1091 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1094 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1098 dev->rx_skb[slot] = skb;
1099 dev->rx_desc[slot].data_len = 0;
1101 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1102 dev->rx_desc[slot].data_ptr =
1103 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1104 DMA_FROM_DEVICE) + 2;
1106 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1107 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1112 static void emac_print_link_status(struct emac_instance *dev)
1114 if (netif_carrier_ok(dev->ndev))
1115 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1116 dev->ndev->name, dev->phy.speed,
1117 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1118 dev->phy.pause ? ", pause enabled" :
1119 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1121 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1124 /* Process ctx, rtnl_lock semaphore */
1125 static int emac_open(struct net_device *ndev)
1127 struct emac_instance *dev = netdev_priv(ndev);
1130 DBG(dev, "open" NL);
1132 /* Setup error IRQ handler */
1133 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1135 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1136 ndev->name, dev->emac_irq);
1140 /* Allocate RX ring */
1141 for (i = 0; i < NUM_RX_BUFF; ++i)
1142 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1143 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1148 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1149 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1150 dev->rx_sg_skb = NULL;
1152 mutex_lock(&dev->link_lock);
1155 /* Start PHY polling now.
1157 if (dev->phy.address >= 0) {
1158 int link_poll_interval;
1159 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1160 dev->phy.def->ops->read_link(&dev->phy);
1161 emac_rx_clk_default(dev);
1162 netif_carrier_on(dev->ndev);
1163 link_poll_interval = PHY_POLL_LINK_ON;
1165 emac_rx_clk_tx(dev);
1166 netif_carrier_off(dev->ndev);
1167 link_poll_interval = PHY_POLL_LINK_OFF;
1169 dev->link_polling = 1;
1171 schedule_delayed_work(&dev->link_work, link_poll_interval);
1172 emac_print_link_status(dev);
1174 netif_carrier_on(dev->ndev);
1176 /* Required for Pause packet support in EMAC */
1177 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1179 emac_configure(dev);
1180 mal_poll_add(dev->mal, &dev->commac);
1181 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1182 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1183 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1184 emac_tx_enable(dev);
1185 emac_rx_enable(dev);
1186 emac_netif_start(dev);
1188 mutex_unlock(&dev->link_lock);
1192 emac_clean_rx_ring(dev);
1193 free_irq(dev->emac_irq, dev);
1200 static int emac_link_differs(struct emac_instance *dev)
1202 u32 r = in_be32(&dev->emacp->mr1);
1204 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1205 int speed, pause, asym_pause;
1207 if (r & EMAC_MR1_MF_1000)
1209 else if (r & EMAC_MR1_MF_100)
1214 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1215 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1224 pause = asym_pause = 0;
1226 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1227 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1231 static void emac_link_timer(struct work_struct *work)
1233 struct emac_instance *dev =
1234 container_of((struct delayed_work *)work,
1235 struct emac_instance, link_work);
1236 int link_poll_interval;
1238 mutex_lock(&dev->link_lock);
1239 DBG2(dev, "link timer" NL);
1244 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1245 if (!netif_carrier_ok(dev->ndev)) {
1246 emac_rx_clk_default(dev);
1247 /* Get new link parameters */
1248 dev->phy.def->ops->read_link(&dev->phy);
1250 netif_carrier_on(dev->ndev);
1251 emac_netif_stop(dev);
1252 emac_full_tx_reset(dev);
1253 emac_netif_start(dev);
1254 emac_print_link_status(dev);
1256 link_poll_interval = PHY_POLL_LINK_ON;
1258 if (netif_carrier_ok(dev->ndev)) {
1259 emac_rx_clk_tx(dev);
1260 netif_carrier_off(dev->ndev);
1261 netif_tx_disable(dev->ndev);
1262 emac_reinitialize(dev);
1263 emac_print_link_status(dev);
1265 link_poll_interval = PHY_POLL_LINK_OFF;
1267 schedule_delayed_work(&dev->link_work, link_poll_interval);
1269 mutex_unlock(&dev->link_lock);
1272 static void emac_force_link_update(struct emac_instance *dev)
1274 netif_carrier_off(dev->ndev);
1276 if (dev->link_polling) {
1277 cancel_rearming_delayed_work(&dev->link_work);
1278 if (dev->link_polling)
1279 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1283 /* Process ctx, rtnl_lock semaphore */
1284 static int emac_close(struct net_device *ndev)
1286 struct emac_instance *dev = netdev_priv(ndev);
1288 DBG(dev, "close" NL);
1290 if (dev->phy.address >= 0) {
1291 dev->link_polling = 0;
1292 cancel_rearming_delayed_work(&dev->link_work);
1294 mutex_lock(&dev->link_lock);
1295 emac_netif_stop(dev);
1297 mutex_unlock(&dev->link_lock);
1299 emac_rx_disable(dev);
1300 emac_tx_disable(dev);
1301 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1302 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1303 mal_poll_del(dev->mal, &dev->commac);
1305 emac_clean_tx_ring(dev);
1306 emac_clean_rx_ring(dev);
1308 free_irq(dev->emac_irq, dev);
1313 static inline u16 emac_tx_csum(struct emac_instance *dev,
1314 struct sk_buff *skb)
1316 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1317 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1318 ++dev->stats.tx_packets_csum;
1319 return EMAC_TX_CTRL_TAH_CSUM;
1324 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1326 struct emac_regs __iomem *p = dev->emacp;
1327 struct net_device *ndev = dev->ndev;
1329 /* Send the packet out. If the if makes a significant perf
1330 * difference, then we can store the TMR0 value in "dev"
1333 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1334 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1336 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1338 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1339 netif_stop_queue(ndev);
1340 DBG2(dev, "stopped TX queue" NL);
1343 ndev->trans_start = jiffies;
1344 ++dev->stats.tx_packets;
1345 dev->stats.tx_bytes += len;
1351 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1353 struct emac_instance *dev = netdev_priv(ndev);
1354 unsigned int len = skb->len;
1357 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1358 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1360 slot = dev->tx_slot++;
1361 if (dev->tx_slot == NUM_TX_BUFF) {
1363 ctrl |= MAL_TX_CTRL_WRAP;
1366 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1368 dev->tx_skb[slot] = skb;
1369 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1372 dev->tx_desc[slot].data_len = (u16) len;
1374 dev->tx_desc[slot].ctrl = ctrl;
1376 return emac_xmit_finish(dev, len);
1379 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1380 u32 pd, int len, int last, u16 base_ctrl)
1383 u16 ctrl = base_ctrl;
1384 int chunk = min(len, MAL_MAX_TX_SIZE);
1387 slot = (slot + 1) % NUM_TX_BUFF;
1390 ctrl |= MAL_TX_CTRL_LAST;
1391 if (slot == NUM_TX_BUFF - 1)
1392 ctrl |= MAL_TX_CTRL_WRAP;
1394 dev->tx_skb[slot] = NULL;
1395 dev->tx_desc[slot].data_ptr = pd;
1396 dev->tx_desc[slot].data_len = (u16) chunk;
1397 dev->tx_desc[slot].ctrl = ctrl;
1408 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1409 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1411 struct emac_instance *dev = netdev_priv(ndev);
1412 int nr_frags = skb_shinfo(skb)->nr_frags;
1413 int len = skb->len, chunk;
1418 /* This is common "fast" path */
1419 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1420 return emac_start_xmit(skb, ndev);
1422 len -= skb->data_len;
1424 /* Note, this is only an *estimation*, we can still run out of empty
1425 * slots because of the additional fragmentation into
1426 * MAL_MAX_TX_SIZE-sized chunks
1428 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1431 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1432 emac_tx_csum(dev, skb);
1433 slot = dev->tx_slot;
1436 dev->tx_skb[slot] = NULL;
1437 chunk = min(len, MAL_MAX_TX_SIZE);
1438 dev->tx_desc[slot].data_ptr = pd =
1439 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1440 dev->tx_desc[slot].data_len = (u16) chunk;
1443 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1446 for (i = 0; i < nr_frags; ++i) {
1447 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1450 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1453 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1456 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1460 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1462 /* Attach skb to the last slot so we don't release it too early */
1463 dev->tx_skb[slot] = skb;
1465 /* Send the packet out */
1466 if (dev->tx_slot == NUM_TX_BUFF - 1)
1467 ctrl |= MAL_TX_CTRL_WRAP;
1469 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1470 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1472 return emac_xmit_finish(dev, skb->len);
1475 /* Well, too bad. Our previous estimation was overly optimistic.
1478 while (slot != dev->tx_slot) {
1479 dev->tx_desc[slot].ctrl = 0;
1482 slot = NUM_TX_BUFF - 1;
1484 ++dev->estats.tx_undo;
1487 netif_stop_queue(ndev);
1488 DBG2(dev, "stopped TX queue" NL);
1493 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1495 struct emac_error_stats *st = &dev->estats;
1497 DBG(dev, "BD TX error %04x" NL, ctrl);
1500 if (ctrl & EMAC_TX_ST_BFCS)
1501 ++st->tx_bd_bad_fcs;
1502 if (ctrl & EMAC_TX_ST_LCS)
1503 ++st->tx_bd_carrier_loss;
1504 if (ctrl & EMAC_TX_ST_ED)
1505 ++st->tx_bd_excessive_deferral;
1506 if (ctrl & EMAC_TX_ST_EC)
1507 ++st->tx_bd_excessive_collisions;
1508 if (ctrl & EMAC_TX_ST_LC)
1509 ++st->tx_bd_late_collision;
1510 if (ctrl & EMAC_TX_ST_MC)
1511 ++st->tx_bd_multple_collisions;
1512 if (ctrl & EMAC_TX_ST_SC)
1513 ++st->tx_bd_single_collision;
1514 if (ctrl & EMAC_TX_ST_UR)
1515 ++st->tx_bd_underrun;
1516 if (ctrl & EMAC_TX_ST_SQE)
1520 static void emac_poll_tx(void *param)
1522 struct emac_instance *dev = param;
1525 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1527 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1528 bad_mask = EMAC_IS_BAD_TX_TAH;
1530 bad_mask = EMAC_IS_BAD_TX;
1532 netif_tx_lock_bh(dev->ndev);
1535 int slot = dev->ack_slot, n = 0;
1537 ctrl = dev->tx_desc[slot].ctrl;
1538 if (!(ctrl & MAL_TX_CTRL_READY)) {
1539 struct sk_buff *skb = dev->tx_skb[slot];
1544 dev->tx_skb[slot] = NULL;
1546 slot = (slot + 1) % NUM_TX_BUFF;
1548 if (unlikely(ctrl & bad_mask))
1549 emac_parse_tx_error(dev, ctrl);
1555 dev->ack_slot = slot;
1556 if (netif_queue_stopped(dev->ndev) &&
1557 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1558 netif_wake_queue(dev->ndev);
1560 DBG2(dev, "tx %d pkts" NL, n);
1563 netif_tx_unlock_bh(dev->ndev);
1566 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1569 struct sk_buff *skb = dev->rx_skb[slot];
1571 DBG2(dev, "recycle %d %d" NL, slot, len);
1574 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1575 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1577 dev->rx_desc[slot].data_len = 0;
1579 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1580 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1583 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1585 struct emac_error_stats *st = &dev->estats;
1587 DBG(dev, "BD RX error %04x" NL, ctrl);
1590 if (ctrl & EMAC_RX_ST_OE)
1591 ++st->rx_bd_overrun;
1592 if (ctrl & EMAC_RX_ST_BP)
1593 ++st->rx_bd_bad_packet;
1594 if (ctrl & EMAC_RX_ST_RP)
1595 ++st->rx_bd_runt_packet;
1596 if (ctrl & EMAC_RX_ST_SE)
1597 ++st->rx_bd_short_event;
1598 if (ctrl & EMAC_RX_ST_AE)
1599 ++st->rx_bd_alignment_error;
1600 if (ctrl & EMAC_RX_ST_BFCS)
1601 ++st->rx_bd_bad_fcs;
1602 if (ctrl & EMAC_RX_ST_PTL)
1603 ++st->rx_bd_packet_too_long;
1604 if (ctrl & EMAC_RX_ST_ORE)
1605 ++st->rx_bd_out_of_range;
1606 if (ctrl & EMAC_RX_ST_IRE)
1607 ++st->rx_bd_in_range;
1610 static inline void emac_rx_csum(struct emac_instance *dev,
1611 struct sk_buff *skb, u16 ctrl)
1613 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1614 if (!ctrl && dev->tah_dev) {
1615 skb->ip_summed = CHECKSUM_UNNECESSARY;
1616 ++dev->stats.rx_packets_csum;
1621 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1623 if (likely(dev->rx_sg_skb != NULL)) {
1624 int len = dev->rx_desc[slot].data_len;
1625 int tot_len = dev->rx_sg_skb->len + len;
1627 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1628 ++dev->estats.rx_dropped_mtu;
1629 dev_kfree_skb(dev->rx_sg_skb);
1630 dev->rx_sg_skb = NULL;
1632 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1633 dev->rx_skb[slot]->data, len);
1634 skb_put(dev->rx_sg_skb, len);
1635 emac_recycle_rx_skb(dev, slot, len);
1639 emac_recycle_rx_skb(dev, slot, 0);
1643 /* NAPI poll context */
1644 static int emac_poll_rx(void *param, int budget)
1646 struct emac_instance *dev = param;
1647 int slot = dev->rx_slot, received = 0;
1649 DBG2(dev, "poll_rx(%d)" NL, budget);
1652 while (budget > 0) {
1654 struct sk_buff *skb;
1655 u16 ctrl = dev->rx_desc[slot].ctrl;
1657 if (ctrl & MAL_RX_CTRL_EMPTY)
1660 skb = dev->rx_skb[slot];
1662 len = dev->rx_desc[slot].data_len;
1664 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1667 ctrl &= EMAC_BAD_RX_MASK;
1668 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1669 emac_parse_rx_error(dev, ctrl);
1670 ++dev->estats.rx_dropped_error;
1671 emac_recycle_rx_skb(dev, slot, 0);
1676 if (len < ETH_HLEN) {
1677 ++dev->estats.rx_dropped_stack;
1678 emac_recycle_rx_skb(dev, slot, len);
1682 if (len && len < EMAC_RX_COPY_THRESH) {
1683 struct sk_buff *copy_skb =
1684 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1685 if (unlikely(!copy_skb))
1688 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1689 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1691 emac_recycle_rx_skb(dev, slot, len);
1693 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1698 skb->dev = dev->ndev;
1699 skb->protocol = eth_type_trans(skb, dev->ndev);
1700 emac_rx_csum(dev, skb, ctrl);
1702 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1703 ++dev->estats.rx_dropped_stack;
1705 ++dev->stats.rx_packets;
1707 dev->stats.rx_bytes += len;
1708 slot = (slot + 1) % NUM_RX_BUFF;
1713 if (ctrl & MAL_RX_CTRL_FIRST) {
1714 BUG_ON(dev->rx_sg_skb);
1715 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1716 DBG(dev, "rx OOM %d" NL, slot);
1717 ++dev->estats.rx_dropped_oom;
1718 emac_recycle_rx_skb(dev, slot, 0);
1720 dev->rx_sg_skb = skb;
1723 } else if (!emac_rx_sg_append(dev, slot) &&
1724 (ctrl & MAL_RX_CTRL_LAST)) {
1726 skb = dev->rx_sg_skb;
1727 dev->rx_sg_skb = NULL;
1729 ctrl &= EMAC_BAD_RX_MASK;
1730 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1731 emac_parse_rx_error(dev, ctrl);
1732 ++dev->estats.rx_dropped_error;
1740 DBG(dev, "rx OOM %d" NL, slot);
1741 /* Drop the packet and recycle skb */
1742 ++dev->estats.rx_dropped_oom;
1743 emac_recycle_rx_skb(dev, slot, 0);
1748 DBG2(dev, "rx %d BDs" NL, received);
1749 dev->rx_slot = slot;
1752 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1754 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1755 DBG2(dev, "rx restart" NL);
1760 if (dev->rx_sg_skb) {
1761 DBG2(dev, "dropping partial rx packet" NL);
1762 ++dev->estats.rx_dropped_error;
1763 dev_kfree_skb(dev->rx_sg_skb);
1764 dev->rx_sg_skb = NULL;
1767 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1768 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1769 emac_rx_enable(dev);
1775 /* NAPI poll context */
1776 static int emac_peek_rx(void *param)
1778 struct emac_instance *dev = param;
1780 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1783 /* NAPI poll context */
1784 static int emac_peek_rx_sg(void *param)
1786 struct emac_instance *dev = param;
1788 int slot = dev->rx_slot;
1790 u16 ctrl = dev->rx_desc[slot].ctrl;
1791 if (ctrl & MAL_RX_CTRL_EMPTY)
1793 else if (ctrl & MAL_RX_CTRL_LAST)
1796 slot = (slot + 1) % NUM_RX_BUFF;
1798 /* I'm just being paranoid here :) */
1799 if (unlikely(slot == dev->rx_slot))
1805 static void emac_rxde(void *param)
1807 struct emac_instance *dev = param;
1809 ++dev->estats.rx_stopped;
1810 emac_rx_disable_async(dev);
1814 static irqreturn_t emac_irq(int irq, void *dev_instance)
1816 struct emac_instance *dev = dev_instance;
1817 struct emac_regs __iomem *p = dev->emacp;
1818 struct emac_error_stats *st = &dev->estats;
1821 spin_lock(&dev->lock);
1823 isr = in_be32(&p->isr);
1824 out_be32(&p->isr, isr);
1826 DBG(dev, "isr = %08x" NL, isr);
1828 if (isr & EMAC4_ISR_TXPE)
1830 if (isr & EMAC4_ISR_RXPE)
1832 if (isr & EMAC4_ISR_TXUE)
1834 if (isr & EMAC4_ISR_RXOE)
1835 ++st->rx_fifo_overrun;
1836 if (isr & EMAC_ISR_OVR)
1838 if (isr & EMAC_ISR_BP)
1839 ++st->rx_bad_packet;
1840 if (isr & EMAC_ISR_RP)
1841 ++st->rx_runt_packet;
1842 if (isr & EMAC_ISR_SE)
1843 ++st->rx_short_event;
1844 if (isr & EMAC_ISR_ALE)
1845 ++st->rx_alignment_error;
1846 if (isr & EMAC_ISR_BFCS)
1848 if (isr & EMAC_ISR_PTLE)
1849 ++st->rx_packet_too_long;
1850 if (isr & EMAC_ISR_ORE)
1851 ++st->rx_out_of_range;
1852 if (isr & EMAC_ISR_IRE)
1854 if (isr & EMAC_ISR_SQE)
1856 if (isr & EMAC_ISR_TE)
1859 spin_unlock(&dev->lock);
1864 static struct net_device_stats *emac_stats(struct net_device *ndev)
1866 struct emac_instance *dev = netdev_priv(ndev);
1867 struct emac_stats *st = &dev->stats;
1868 struct emac_error_stats *est = &dev->estats;
1869 struct net_device_stats *nst = &dev->nstats;
1870 unsigned long flags;
1872 DBG2(dev, "stats" NL);
1874 /* Compute "legacy" statistics */
1875 spin_lock_irqsave(&dev->lock, flags);
1876 nst->rx_packets = (unsigned long)st->rx_packets;
1877 nst->rx_bytes = (unsigned long)st->rx_bytes;
1878 nst->tx_packets = (unsigned long)st->tx_packets;
1879 nst->tx_bytes = (unsigned long)st->tx_bytes;
1880 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1881 est->rx_dropped_error +
1882 est->rx_dropped_resize +
1883 est->rx_dropped_mtu);
1884 nst->tx_dropped = (unsigned long)est->tx_dropped;
1886 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1887 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1888 est->rx_fifo_overrun +
1890 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1891 est->rx_alignment_error);
1892 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1894 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1895 est->rx_bd_short_event +
1896 est->rx_bd_packet_too_long +
1897 est->rx_bd_out_of_range +
1898 est->rx_bd_in_range +
1899 est->rx_runt_packet +
1900 est->rx_short_event +
1901 est->rx_packet_too_long +
1902 est->rx_out_of_range +
1905 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1906 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1908 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1909 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1910 est->tx_bd_excessive_collisions +
1911 est->tx_bd_late_collision +
1912 est->tx_bd_multple_collisions);
1913 spin_unlock_irqrestore(&dev->lock, flags);
1917 static struct mal_commac_ops emac_commac_ops = {
1918 .poll_tx = &emac_poll_tx,
1919 .poll_rx = &emac_poll_rx,
1920 .peek_rx = &emac_peek_rx,
1924 static struct mal_commac_ops emac_commac_sg_ops = {
1925 .poll_tx = &emac_poll_tx,
1926 .poll_rx = &emac_poll_rx,
1927 .peek_rx = &emac_peek_rx_sg,
1931 /* Ethtool support */
1932 static int emac_ethtool_get_settings(struct net_device *ndev,
1933 struct ethtool_cmd *cmd)
1935 struct emac_instance *dev = netdev_priv(ndev);
1937 cmd->supported = dev->phy.features;
1938 cmd->port = PORT_MII;
1939 cmd->phy_address = dev->phy.address;
1941 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1943 mutex_lock(&dev->link_lock);
1944 cmd->advertising = dev->phy.advertising;
1945 cmd->autoneg = dev->phy.autoneg;
1946 cmd->speed = dev->phy.speed;
1947 cmd->duplex = dev->phy.duplex;
1948 mutex_unlock(&dev->link_lock);
1953 static int emac_ethtool_set_settings(struct net_device *ndev,
1954 struct ethtool_cmd *cmd)
1956 struct emac_instance *dev = netdev_priv(ndev);
1957 u32 f = dev->phy.features;
1959 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1960 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1962 /* Basic sanity checks */
1963 if (dev->phy.address < 0)
1965 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1967 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1969 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1972 if (cmd->autoneg == AUTONEG_DISABLE) {
1973 switch (cmd->speed) {
1975 if (cmd->duplex == DUPLEX_HALF
1976 && !(f & SUPPORTED_10baseT_Half))
1978 if (cmd->duplex == DUPLEX_FULL
1979 && !(f & SUPPORTED_10baseT_Full))
1983 if (cmd->duplex == DUPLEX_HALF
1984 && !(f & SUPPORTED_100baseT_Half))
1986 if (cmd->duplex == DUPLEX_FULL
1987 && !(f & SUPPORTED_100baseT_Full))
1991 if (cmd->duplex == DUPLEX_HALF
1992 && !(f & SUPPORTED_1000baseT_Half))
1994 if (cmd->duplex == DUPLEX_FULL
1995 && !(f & SUPPORTED_1000baseT_Full))
2002 mutex_lock(&dev->link_lock);
2003 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2005 mutex_unlock(&dev->link_lock);
2008 if (!(f & SUPPORTED_Autoneg))
2011 mutex_lock(&dev->link_lock);
2012 dev->phy.def->ops->setup_aneg(&dev->phy,
2013 (cmd->advertising & f) |
2014 (dev->phy.advertising &
2016 ADVERTISED_Asym_Pause)));
2017 mutex_unlock(&dev->link_lock);
2019 emac_force_link_update(dev);
2024 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2025 struct ethtool_ringparam *rp)
2027 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2028 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2031 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2032 struct ethtool_pauseparam *pp)
2034 struct emac_instance *dev = netdev_priv(ndev);
2036 mutex_lock(&dev->link_lock);
2037 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2038 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2041 if (dev->phy.duplex == DUPLEX_FULL) {
2043 pp->rx_pause = pp->tx_pause = 1;
2044 else if (dev->phy.asym_pause)
2047 mutex_unlock(&dev->link_lock);
2050 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2052 struct emac_instance *dev = netdev_priv(ndev);
2054 return dev->tah_dev != NULL;
2057 static int emac_get_regs_len(struct emac_instance *dev)
2059 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2060 return sizeof(struct emac_ethtool_regs_subhdr) +
2061 EMAC4_ETHTOOL_REGS_SIZE(dev);
2063 return sizeof(struct emac_ethtool_regs_subhdr) +
2064 EMAC_ETHTOOL_REGS_SIZE(dev);
2067 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2069 struct emac_instance *dev = netdev_priv(ndev);
2072 size = sizeof(struct emac_ethtool_regs_hdr) +
2073 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2074 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2075 size += zmii_get_regs_len(dev->zmii_dev);
2076 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2077 size += rgmii_get_regs_len(dev->rgmii_dev);
2078 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2079 size += tah_get_regs_len(dev->tah_dev);
2084 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2086 struct emac_ethtool_regs_subhdr *hdr = buf;
2088 hdr->index = dev->cell_index;
2089 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2090 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2091 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2092 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2094 hdr->version = EMAC_ETHTOOL_REGS_VER;
2095 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2096 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2100 static void emac_ethtool_get_regs(struct net_device *ndev,
2101 struct ethtool_regs *regs, void *buf)
2103 struct emac_instance *dev = netdev_priv(ndev);
2104 struct emac_ethtool_regs_hdr *hdr = buf;
2106 hdr->components = 0;
2109 buf = mal_dump_regs(dev->mal, buf);
2110 buf = emac_dump_regs(dev, buf);
2111 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2112 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2113 buf = zmii_dump_regs(dev->zmii_dev, buf);
2115 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2116 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2117 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2119 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2120 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2121 buf = tah_dump_regs(dev->tah_dev, buf);
2125 static int emac_ethtool_nway_reset(struct net_device *ndev)
2127 struct emac_instance *dev = netdev_priv(ndev);
2130 DBG(dev, "nway_reset" NL);
2132 if (dev->phy.address < 0)
2135 mutex_lock(&dev->link_lock);
2136 if (!dev->phy.autoneg) {
2141 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2143 mutex_unlock(&dev->link_lock);
2144 emac_force_link_update(dev);
2148 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2150 return EMAC_ETHTOOL_STATS_COUNT;
2153 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2156 if (stringset == ETH_SS_STATS)
2157 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2160 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2161 struct ethtool_stats *estats,
2164 struct emac_instance *dev = netdev_priv(ndev);
2166 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2167 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2168 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2171 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2172 struct ethtool_drvinfo *info)
2174 struct emac_instance *dev = netdev_priv(ndev);
2176 strcpy(info->driver, "ibm_emac");
2177 strcpy(info->version, DRV_VERSION);
2178 info->fw_version[0] = '\0';
2179 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2180 dev->cell_index, dev->ofdev->node->full_name);
2181 info->n_stats = emac_ethtool_get_stats_count(ndev);
2182 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2185 static const struct ethtool_ops emac_ethtool_ops = {
2186 .get_settings = emac_ethtool_get_settings,
2187 .set_settings = emac_ethtool_set_settings,
2188 .get_drvinfo = emac_ethtool_get_drvinfo,
2190 .get_regs_len = emac_ethtool_get_regs_len,
2191 .get_regs = emac_ethtool_get_regs,
2193 .nway_reset = emac_ethtool_nway_reset,
2195 .get_ringparam = emac_ethtool_get_ringparam,
2196 .get_pauseparam = emac_ethtool_get_pauseparam,
2198 .get_rx_csum = emac_ethtool_get_rx_csum,
2200 .get_strings = emac_ethtool_get_strings,
2201 .get_stats_count = emac_ethtool_get_stats_count,
2202 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2204 .get_link = ethtool_op_get_link,
2205 .get_tx_csum = ethtool_op_get_tx_csum,
2206 .get_sg = ethtool_op_get_sg,
2209 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2211 struct emac_instance *dev = netdev_priv(ndev);
2212 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2214 DBG(dev, "ioctl %08x" NL, cmd);
2216 if (dev->phy.address < 0)
2221 case SIOCDEVPRIVATE:
2222 data[0] = dev->phy.address;
2225 case SIOCDEVPRIVATE + 1:
2226 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2230 case SIOCDEVPRIVATE + 2:
2231 if (!capable(CAP_NET_ADMIN))
2233 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2240 struct emac_depentry {
2242 struct device_node *node;
2243 struct of_device *ofdev;
2247 #define EMAC_DEP_MAL_IDX 0
2248 #define EMAC_DEP_ZMII_IDX 1
2249 #define EMAC_DEP_RGMII_IDX 2
2250 #define EMAC_DEP_TAH_IDX 3
2251 #define EMAC_DEP_MDIO_IDX 4
2252 #define EMAC_DEP_PREV_IDX 5
2253 #define EMAC_DEP_COUNT 6
2255 static int __devinit emac_check_deps(struct emac_instance *dev,
2256 struct emac_depentry *deps)
2259 struct device_node *np;
2261 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2262 /* no dependency on that item, allright */
2263 if (deps[i].phandle == 0) {
2267 /* special case for blist as the dependency might go away */
2268 if (i == EMAC_DEP_PREV_IDX) {
2269 np = *(dev->blist - 1);
2271 deps[i].phandle = 0;
2275 if (deps[i].node == NULL)
2276 deps[i].node = of_node_get(np);
2278 if (deps[i].node == NULL)
2279 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2280 if (deps[i].node == NULL)
2282 if (deps[i].ofdev == NULL)
2283 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2284 if (deps[i].ofdev == NULL)
2286 if (deps[i].drvdata == NULL)
2287 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2288 if (deps[i].drvdata != NULL)
2291 return (there == EMAC_DEP_COUNT);
2294 static void emac_put_deps(struct emac_instance *dev)
2297 of_dev_put(dev->mal_dev);
2299 of_dev_put(dev->zmii_dev);
2301 of_dev_put(dev->rgmii_dev);
2303 of_dev_put(dev->mdio_dev);
2305 of_dev_put(dev->tah_dev);
2308 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2309 unsigned long action, void *data)
2311 /* We are only intereted in device addition */
2312 if (action == BUS_NOTIFY_BOUND_DRIVER)
2313 wake_up_all(&emac_probe_wait);
2317 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2318 .notifier_call = emac_of_bus_notify
2321 static int __devinit emac_wait_deps(struct emac_instance *dev)
2323 struct emac_depentry deps[EMAC_DEP_COUNT];
2326 memset(&deps, 0, sizeof(deps));
2328 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2329 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2330 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2332 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2334 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2335 if (dev->blist && dev->blist > emac_boot_list)
2336 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2337 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2338 wait_event_timeout(emac_probe_wait,
2339 emac_check_deps(dev, deps),
2340 EMAC_PROBE_DEP_TIMEOUT);
2341 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2342 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2343 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2345 of_node_put(deps[i].node);
2346 if (err && deps[i].ofdev)
2347 of_dev_put(deps[i].ofdev);
2350 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2351 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2352 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2353 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2354 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2356 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2357 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2361 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2362 u32 *val, int fatal)
2365 const u32 *prop = of_get_property(np, name, &len);
2366 if (prop == NULL || len < sizeof(u32)) {
2368 printk(KERN_ERR "%s: missing %s property\n",
2369 np->full_name, name);
2376 static int __devinit emac_init_phy(struct emac_instance *dev)
2378 struct device_node *np = dev->ofdev->node;
2379 struct net_device *ndev = dev->ndev;
2383 dev->phy.dev = ndev;
2384 dev->phy.mode = dev->phy_mode;
2386 /* PHY-less configuration.
2387 * XXX I probably should move these settings to the dev tree
2389 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2392 /* PHY-less configuration.
2393 * XXX I probably should move these settings to the dev tree
2395 dev->phy.address = -1;
2396 dev->phy.features = SUPPORTED_MII;
2397 if (emac_phy_supports_gige(dev->phy_mode))
2398 dev->phy.features |= SUPPORTED_1000baseT_Full;
2400 dev->phy.features |= SUPPORTED_100baseT_Full;
2406 mutex_lock(&emac_phy_map_lock);
2407 phy_map = dev->phy_map | busy_phy_map;
2409 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2411 dev->phy.mdio_read = emac_mdio_read;
2412 dev->phy.mdio_write = emac_mdio_write;
2414 /* Enable internal clock source */
2415 #ifdef CONFIG_PPC_DCR_NATIVE
2416 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2417 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2419 /* PHY clock workaround */
2420 emac_rx_clk_tx(dev);
2422 /* Enable internal clock source on 440GX*/
2423 #ifdef CONFIG_PPC_DCR_NATIVE
2424 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2425 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2427 /* Configure EMAC with defaults so we can at least use MDIO
2428 * This is needed mostly for 440GX
2430 if (emac_phy_gpcs(dev->phy.mode)) {
2432 * Make GPCS PHY address equal to EMAC index.
2433 * We probably should take into account busy_phy_map
2434 * and/or phy_map here.
2436 * Note that the busy_phy_map is currently global
2437 * while it should probably be per-ASIC...
2439 dev->phy.gpcs_address = dev->gpcs_address;
2440 if (dev->phy.gpcs_address == 0xffffffff)
2441 dev->phy.address = dev->cell_index;
2444 emac_configure(dev);
2446 if (dev->phy_address != 0xffffffff)
2447 phy_map = ~(1 << dev->phy_address);
2449 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2450 if (!(phy_map & 1)) {
2452 busy_phy_map |= 1 << i;
2454 /* Quick check if there is a PHY at the address */
2455 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2456 if (r == 0xffff || r < 0)
2458 if (!emac_mii_phy_probe(&dev->phy, i))
2462 /* Enable external clock source */
2463 #ifdef CONFIG_PPC_DCR_NATIVE
2464 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2465 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2467 mutex_unlock(&emac_phy_map_lock);
2469 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2474 if (dev->phy.def->ops->init)
2475 dev->phy.def->ops->init(&dev->phy);
2477 /* Disable any PHY features not supported by the platform */
2478 dev->phy.def->features &= ~dev->phy_feat_exc;
2480 /* Setup initial link parameters */
2481 if (dev->phy.features & SUPPORTED_Autoneg) {
2482 adv = dev->phy.features;
2483 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2484 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2485 /* Restart autonegotiation */
2486 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2488 u32 f = dev->phy.def->features;
2489 int speed = SPEED_10, fd = DUPLEX_HALF;
2491 /* Select highest supported speed/duplex */
2492 if (f & SUPPORTED_1000baseT_Full) {
2495 } else if (f & SUPPORTED_1000baseT_Half)
2497 else if (f & SUPPORTED_100baseT_Full) {
2500 } else if (f & SUPPORTED_100baseT_Half)
2502 else if (f & SUPPORTED_10baseT_Full)
2505 /* Force link parameters */
2506 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2511 static int __devinit emac_init_config(struct emac_instance *dev)
2513 struct device_node *np = dev->ofdev->node;
2516 const char *pm, *phy_modes[] = {
2518 [PHY_MODE_MII] = "mii",
2519 [PHY_MODE_RMII] = "rmii",
2520 [PHY_MODE_SMII] = "smii",
2521 [PHY_MODE_RGMII] = "rgmii",
2522 [PHY_MODE_TBI] = "tbi",
2523 [PHY_MODE_GMII] = "gmii",
2524 [PHY_MODE_RTBI] = "rtbi",
2525 [PHY_MODE_SGMII] = "sgmii",
2528 /* Read config from device-tree */
2529 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2531 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2533 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2535 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2537 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2538 dev->max_mtu = 1500;
2539 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2540 dev->rx_fifo_size = 2048;
2541 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2542 dev->tx_fifo_size = 2048;
2543 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2544 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2545 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2546 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2547 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2548 dev->phy_address = 0xffffffff;
2549 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2550 dev->phy_map = 0xffffffff;
2551 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2552 dev->gpcs_address = 0xffffffff;
2553 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2555 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2557 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2559 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2561 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2563 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2564 dev->zmii_port = 0xffffffff;;
2565 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2567 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2568 dev->rgmii_port = 0xffffffff;;
2569 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2570 dev->fifo_entry_size = 16;
2571 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2572 dev->mal_burst_size = 256;
2574 /* PHY mode needs some decoding */
2575 dev->phy_mode = PHY_MODE_NA;
2576 pm = of_get_property(np, "phy-mode", &plen);
2579 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2580 if (!strcasecmp(pm, phy_modes[i])) {
2586 /* Backward compat with non-final DT */
2587 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2588 u32 nmode = *(const u32 *)pm;
2589 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2590 dev->phy_mode = nmode;
2593 /* Check EMAC version */
2594 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2595 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2596 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2597 of_device_is_compatible(np, "ibm,emac-460gt"))
2598 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2599 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2600 dev->features |= EMAC_FTR_EMAC4;
2601 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2602 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2604 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2605 of_device_is_compatible(np, "ibm,emac-440gr"))
2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2608 #ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2609 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2611 printk(KERN_ERR "%s: Flow control not disabled!\n",
2619 /* Fixup some feature bits based on the device tree */
2620 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2621 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2622 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2623 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2625 /* CAB lacks the appropriate properties */
2626 if (of_device_is_compatible(np, "ibm,emac-axon"))
2627 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2628 EMAC_FTR_STACR_OC_INVERT;
2630 /* Enable TAH/ZMII/RGMII features as found */
2631 if (dev->tah_ph != 0) {
2632 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2633 dev->features |= EMAC_FTR_HAS_TAH;
2635 printk(KERN_ERR "%s: TAH support not enabled !\n",
2641 if (dev->zmii_ph != 0) {
2642 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2643 dev->features |= EMAC_FTR_HAS_ZMII;
2645 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2651 if (dev->rgmii_ph != 0) {
2652 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2653 dev->features |= EMAC_FTR_HAS_RGMII;
2655 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2661 /* Read MAC-address */
2662 p = of_get_property(np, "local-mac-address", NULL);
2664 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2668 memcpy(dev->ndev->dev_addr, p, 6);
2670 /* IAHT and GAHT filter parameterization */
2671 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2672 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2673 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2675 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2676 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2679 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2680 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2681 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2682 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2683 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2688 static int __devinit emac_probe(struct of_device *ofdev,
2689 const struct of_device_id *match)
2691 struct net_device *ndev;
2692 struct emac_instance *dev;
2693 struct device_node *np = ofdev->node;
2694 struct device_node **blist = NULL;
2697 /* Skip unused/unwired EMACS. We leave the check for an unused
2698 * property here for now, but new flat device trees should set a
2699 * status property to "disabled" instead.
2701 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2704 /* Find ourselves in the bootlist if we are there */
2705 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2706 if (emac_boot_list[i] == np)
2707 blist = &emac_boot_list[i];
2709 /* Allocate our net_device structure */
2711 ndev = alloc_etherdev(sizeof(struct emac_instance));
2713 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2717 dev = netdev_priv(ndev);
2721 SET_NETDEV_DEV(ndev, &ofdev->dev);
2723 /* Initialize some embedded data structures */
2724 mutex_init(&dev->mdio_lock);
2725 mutex_init(&dev->link_lock);
2726 spin_lock_init(&dev->lock);
2727 INIT_WORK(&dev->reset_work, emac_reset_work);
2729 /* Init various config data based on device-tree */
2730 err = emac_init_config(dev);
2734 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2735 dev->emac_irq = irq_of_parse_and_map(np, 0);
2736 dev->wol_irq = irq_of_parse_and_map(np, 1);
2737 if (dev->emac_irq == NO_IRQ) {
2738 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2741 ndev->irq = dev->emac_irq;
2744 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2745 printk(KERN_ERR "%s: Can't get registers address\n",
2749 // TODO : request_mem_region
2750 dev->emacp = ioremap(dev->rsrc_regs.start,
2751 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2752 if (dev->emacp == NULL) {
2753 printk(KERN_ERR "%s: Can't map device registers!\n",
2759 /* Wait for dependent devices */
2760 err = emac_wait_deps(dev);
2763 "%s: Timeout waiting for dependent devices\n",
2765 /* display more info about what's missing ? */
2768 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2769 if (dev->mdio_dev != NULL)
2770 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2772 /* Register with MAL */
2773 dev->commac.ops = &emac_commac_ops;
2774 dev->commac.dev = dev;
2775 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2776 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2777 err = mal_register_commac(dev->mal, &dev->commac);
2779 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2780 np->full_name, dev->mal_dev->node->full_name);
2783 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2784 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2786 /* Get pointers to BD rings */
2788 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2790 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2792 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2793 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2796 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2797 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2798 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2799 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2801 /* Attach to ZMII, if needed */
2802 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2803 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2804 goto err_unreg_commac;
2806 /* Attach to RGMII, if needed */
2807 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2808 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2809 goto err_detach_zmii;
2811 /* Attach to TAH, if needed */
2812 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2813 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2814 goto err_detach_rgmii;
2816 /* Set some link defaults before we can find out real parameters */
2817 dev->phy.speed = SPEED_100;
2818 dev->phy.duplex = DUPLEX_FULL;
2819 dev->phy.autoneg = AUTONEG_DISABLE;
2820 dev->phy.pause = dev->phy.asym_pause = 0;
2821 dev->stop_timeout = STOP_TIMEOUT_100;
2822 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2824 /* Find PHY if any */
2825 err = emac_init_phy(dev);
2827 goto err_detach_tah;
2829 /* Fill in the driver function table */
2830 ndev->open = &emac_open;
2832 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2833 ndev->tx_timeout = &emac_tx_timeout;
2834 ndev->watchdog_timeo = 5 * HZ;
2835 ndev->stop = &emac_close;
2836 ndev->get_stats = &emac_stats;
2837 ndev->set_multicast_list = &emac_set_multicast_list;
2838 ndev->do_ioctl = &emac_ioctl;
2839 if (emac_phy_supports_gige(dev->phy_mode)) {
2840 ndev->hard_start_xmit = &emac_start_xmit_sg;
2841 ndev->change_mtu = &emac_change_mtu;
2842 dev->commac.ops = &emac_commac_sg_ops;
2844 ndev->hard_start_xmit = &emac_start_xmit;
2846 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2848 netif_carrier_off(ndev);
2849 netif_stop_queue(ndev);
2851 err = register_netdev(ndev);
2853 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2854 np->full_name, err);
2855 goto err_detach_tah;
2858 /* Set our drvdata last as we don't want them visible until we are
2862 dev_set_drvdata(&ofdev->dev, dev);
2864 /* There's a new kid in town ! Let's tell everybody */
2865 wake_up_all(&emac_probe_wait);
2869 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2870 ndev->name, dev->cell_index, np->full_name,
2871 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2872 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2874 if (dev->phy_mode == PHY_MODE_SGMII)
2875 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2877 if (dev->phy.address >= 0)
2878 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2879 dev->phy.def->name, dev->phy.address);
2881 emac_dbg_register(dev);
2886 /* I have a bad feeling about this ... */
2889 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2890 tah_detach(dev->tah_dev, dev->tah_port);
2892 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2893 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2895 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2896 zmii_detach(dev->zmii_dev, dev->zmii_port);
2898 mal_unregister_commac(dev->mal, &dev->commac);
2902 iounmap(dev->emacp);
2904 if (dev->wol_irq != NO_IRQ)
2905 irq_dispose_mapping(dev->wol_irq);
2906 if (dev->emac_irq != NO_IRQ)
2907 irq_dispose_mapping(dev->emac_irq);
2911 /* if we were on the bootlist, remove us as we won't show up and
2912 * wake up all waiters to notify them in case they were waiting
2917 wake_up_all(&emac_probe_wait);
2922 static int __devexit emac_remove(struct of_device *ofdev)
2924 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2926 DBG(dev, "remove" NL);
2928 dev_set_drvdata(&ofdev->dev, NULL);
2930 unregister_netdev(dev->ndev);
2932 flush_scheduled_work();
2934 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2935 tah_detach(dev->tah_dev, dev->tah_port);
2936 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2937 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2938 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2939 zmii_detach(dev->zmii_dev, dev->zmii_port);
2941 mal_unregister_commac(dev->mal, &dev->commac);
2944 emac_dbg_unregister(dev);
2945 iounmap(dev->emacp);
2947 if (dev->wol_irq != NO_IRQ)
2948 irq_dispose_mapping(dev->wol_irq);
2949 if (dev->emac_irq != NO_IRQ)
2950 irq_dispose_mapping(dev->emac_irq);
2957 /* XXX Features in here should be replaced by properties... */
2958 static struct of_device_id emac_match[] =
2962 .compatible = "ibm,emac",
2966 .compatible = "ibm,emac4",
2970 .compatible = "ibm,emac4sync",
2975 static struct of_platform_driver emac_driver = {
2977 .match_table = emac_match,
2979 .probe = emac_probe,
2980 .remove = emac_remove,
2983 static void __init emac_make_bootlist(void)
2985 struct device_node *np = NULL;
2986 int j, max, i = 0, k;
2987 int cell_indices[EMAC_BOOT_LIST_SIZE];
2990 while((np = of_find_all_nodes(np)) != NULL) {
2993 if (of_match_node(emac_match, np) == NULL)
2995 if (of_get_property(np, "unused", NULL))
2997 idx = of_get_property(np, "cell-index", NULL);
3000 cell_indices[i] = *idx;
3001 emac_boot_list[i++] = of_node_get(np);
3002 if (i >= EMAC_BOOT_LIST_SIZE) {
3009 /* Bubble sort them (doh, what a creative algorithm :-) */
3010 for (i = 0; max > 1 && (i < (max - 1)); i++)
3011 for (j = i; j < max; j++) {
3012 if (cell_indices[i] > cell_indices[j]) {
3013 np = emac_boot_list[i];
3014 emac_boot_list[i] = emac_boot_list[j];
3015 emac_boot_list[j] = np;
3016 k = cell_indices[i];
3017 cell_indices[i] = cell_indices[j];
3018 cell_indices[j] = k;
3023 static int __init emac_init(void)
3027 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3029 /* Init debug stuff */
3032 /* Build EMAC boot list */
3033 emac_make_bootlist();
3035 /* Init submodules */
3048 rc = of_register_platform_driver(&emac_driver);
3066 static void __exit emac_exit(void)
3070 of_unregister_platform_driver(&emac_driver);
3078 /* Destroy EMAC boot list */
3079 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3080 if (emac_boot_list[i])
3081 of_node_put(emac_boot_list[i]);
3084 module_init(emac_init);
3085 module_exit(emac_exit);