2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/crc32.h>
31 #include <linux/ethtool.h>
32 #include <linux/mii.h>
33 #include <linux/bitops.h>
34 #include <linux/workqueue.h>
36 #include <asm/processor.h>
39 #include <asm/uaccess.h>
44 * Lack of dma_unmap_???? calls is intentional.
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
60 #define DRV_NAME "emac"
61 #define DRV_VERSION "3.54"
62 #define DRV_DESC "PPC 4xx OCP EMAC driver"
64 MODULE_DESCRIPTION(DRV_DESC);
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67 MODULE_LICENSE("GPL");
70 * PPC64 doesn't (yet) have a cacheable_memcpy
73 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
92 static u32 busy_phy_map;
93 static DEFINE_MUTEX(emac_phy_map_lock);
95 /* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
98 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
100 /* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112 #define EMAC_BOOT_LIST_SIZE 4
113 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
115 /* How long should I wait for dependent devices ? */
116 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
118 /* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
121 static inline void emac_report_timeout_error(struct emac_instance *dev,
125 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
128 /* PHY polling intervals */
129 #define PHY_POLL_LINK_ON HZ
130 #define PHY_POLL_LINK_OFF (HZ / 5)
132 /* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
135 #define STOP_TIMEOUT_10 1230
136 #define STOP_TIMEOUT_100 124
137 #define STOP_TIMEOUT_1000 13
138 #define STOP_TIMEOUT_1000_JUMBO 73
140 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
159 static irqreturn_t emac_irq(int irq, void *dev_instance);
160 static void emac_clean_tx_ring(struct emac_instance *dev);
161 static void __emac_set_multicast_list(struct emac_instance *dev);
163 static inline int emac_phy_supports_gige(int phy_mode)
165 return phy_mode == PHY_MODE_GMII ||
166 phy_mode == PHY_MODE_RGMII ||
167 phy_mode == PHY_MODE_TBI ||
168 phy_mode == PHY_MODE_RTBI;
171 static inline int emac_phy_gpcs(int phy_mode)
173 return phy_mode == PHY_MODE_TBI ||
174 phy_mode == PHY_MODE_RTBI;
177 static inline void emac_tx_enable(struct emac_instance *dev)
179 struct emac_regs __iomem *p = dev->emacp;
182 DBG(dev, "tx_enable" NL);
184 r = in_be32(&p->mr0);
185 if (!(r & EMAC_MR0_TXE))
186 out_be32(&p->mr0, r | EMAC_MR0_TXE);
189 static void emac_tx_disable(struct emac_instance *dev)
191 struct emac_regs __iomem *p = dev->emacp;
194 DBG(dev, "tx_disable" NL);
196 r = in_be32(&p->mr0);
197 if (r & EMAC_MR0_TXE) {
198 int n = dev->stop_timeout;
199 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
200 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
205 emac_report_timeout_error(dev, "TX disable timeout");
209 static void emac_rx_enable(struct emac_instance *dev)
211 struct emac_regs __iomem *p = dev->emacp;
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
217 DBG(dev, "rx_enable" NL);
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_RXE)) {
221 if (unlikely(!(r & EMAC_MR0_RXI))) {
222 /* Wait if previous async disable is still in progress */
223 int n = dev->stop_timeout;
224 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
229 emac_report_timeout_error(dev,
230 "RX disable timeout");
232 out_be32(&p->mr0, r | EMAC_MR0_RXE);
238 static void emac_rx_disable(struct emac_instance *dev)
240 struct emac_regs __iomem *p = dev->emacp;
243 DBG(dev, "rx_disable" NL);
245 r = in_be32(&p->mr0);
246 if (r & EMAC_MR0_RXE) {
247 int n = dev->stop_timeout;
248 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
249 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
254 emac_report_timeout_error(dev, "RX disable timeout");
258 static inline void emac_netif_stop(struct emac_instance *dev)
260 netif_tx_lock_bh(dev->ndev);
262 netif_tx_unlock_bh(dev->ndev);
263 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
264 mal_poll_disable(dev->mal, &dev->commac);
265 netif_tx_disable(dev->ndev);
268 static inline void emac_netif_start(struct emac_instance *dev)
270 netif_tx_lock_bh(dev->ndev);
272 if (dev->mcast_pending && netif_running(dev->ndev))
273 __emac_set_multicast_list(dev);
274 netif_tx_unlock_bh(dev->ndev);
276 netif_wake_queue(dev->ndev);
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
283 mal_poll_enable(dev->mal, &dev->commac);
286 static inline void emac_rx_disable_async(struct emac_instance *dev)
288 struct emac_regs __iomem *p = dev->emacp;
291 DBG(dev, "rx_disable_async" NL);
293 r = in_be32(&p->mr0);
294 if (r & EMAC_MR0_RXE)
295 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 static int emac_reset(struct emac_instance *dev)
300 struct emac_regs __iomem *p = dev->emacp;
303 DBG(dev, "reset" NL);
305 if (!dev->reset_failed) {
306 /* 40x erratum suggests stopping RX channel before reset,
309 emac_rx_disable(dev);
310 emac_tx_disable(dev);
313 out_be32(&p->mr0, EMAC_MR0_SRST);
314 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
318 dev->reset_failed = 0;
321 emac_report_timeout_error(dev, "reset timeout");
322 dev->reset_failed = 1;
327 static void emac_hash_mc(struct emac_instance *dev)
329 struct emac_regs __iomem *p = dev->emacp;
331 struct dev_mc_list *dmi;
333 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
335 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
337 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
338 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
339 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
341 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
342 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
344 out_be32(&p->gaht1, gaht[0]);
345 out_be32(&p->gaht2, gaht[1]);
346 out_be32(&p->gaht3, gaht[2]);
347 out_be32(&p->gaht4, gaht[3]);
350 static inline u32 emac_iff2rmr(struct net_device *ndev)
352 struct emac_instance *dev = netdev_priv(ndev);
355 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
357 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
362 if (ndev->flags & IFF_PROMISC)
364 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
366 else if (ndev->mc_count > 0)
372 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
374 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
376 DBG2(dev, "__emac_calc_base_mr1" NL);
380 ret |= EMAC_MR1_TFS_2K;
383 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
384 dev->ndev->name, tx_size);
389 ret |= EMAC_MR1_RFS_16K;
392 ret |= EMAC_MR1_RFS_4K;
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, rx_size);
402 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
404 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
405 EMAC4_MR1_OBCI(dev->opb_bus_freq);
407 DBG2(dev, "__emac4_calc_base_mr1" NL);
411 ret |= EMAC4_MR1_TFS_4K;
414 ret |= EMAC4_MR1_TFS_2K;
417 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
418 dev->ndev->name, tx_size);
423 ret |= EMAC4_MR1_RFS_16K;
426 ret |= EMAC4_MR1_RFS_4K;
429 ret |= EMAC4_MR1_RFS_2K;
432 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433 dev->ndev->name, rx_size);
439 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
441 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
442 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
443 __emac_calc_base_mr1(dev, tx_size, rx_size);
446 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
448 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
449 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
451 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
454 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
455 unsigned int low, unsigned int high)
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 return (low << 22) | ( (high & 0x3ff) << 6);
460 return (low << 23) | ( (high & 0x1ff) << 7);
463 static int emac_configure(struct emac_instance *dev)
465 struct emac_regs __iomem *p = dev->emacp;
466 struct net_device *ndev = dev->ndev;
467 int tx_size, rx_size;
470 DBG(dev, "configure" NL);
472 if (emac_reset(dev) < 0)
475 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
476 tah_reset(dev->tah_dev);
478 DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n",
479 dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
481 /* Default fifo sizes */
482 tx_size = dev->tx_fifo_size;
483 rx_size = dev->rx_fifo_size;
485 /* Check for full duplex */
486 if (dev->phy.duplex == DUPLEX_FULL)
487 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
489 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
490 dev->stop_timeout = STOP_TIMEOUT_10;
491 switch (dev->phy.speed) {
493 if (emac_phy_gpcs(dev->phy.mode)) {
494 mr1 |= EMAC_MR1_MF_1000GPCS |
495 EMAC_MR1_MF_IPPA(dev->phy.address);
497 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
498 * identify this GPCS PHY later.
500 out_be32(&p->ipcr, 0xdeadbeef);
502 mr1 |= EMAC_MR1_MF_1000;
504 /* Extended fifo sizes */
505 tx_size = dev->tx_fifo_size_gige;
506 rx_size = dev->rx_fifo_size_gige;
508 if (dev->ndev->mtu > ETH_DATA_LEN) {
509 mr1 |= EMAC_MR1_JPSM;
510 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
512 dev->stop_timeout = STOP_TIMEOUT_1000;
515 mr1 |= EMAC_MR1_MF_100;
516 dev->stop_timeout = STOP_TIMEOUT_100;
518 default: /* make gcc happy */
522 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
523 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
525 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
526 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
528 /* on 40x erratum forces us to NOT use integrated flow control,
529 * let's hope it works on 44x ;)
531 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
532 dev->phy.duplex == DUPLEX_FULL) {
534 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
535 else if (dev->phy.asym_pause)
539 /* Add base settings & fifo sizes & program MR1 */
540 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
541 out_be32(&p->mr1, mr1);
543 /* Set individual MAC address */
544 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
545 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
546 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
549 /* VLAN Tag Protocol ID */
550 out_be32(&p->vtpid, 0x8100);
552 /* Receive mode register */
553 r = emac_iff2rmr(ndev);
554 if (r & EMAC_RMR_MAE)
556 out_be32(&p->rmr, r);
558 /* FIFOs thresholds */
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
561 tx_size / 2 / dev->fifo_entry_size);
563 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
564 tx_size / 2 / dev->fifo_entry_size);
565 out_be32(&p->tmr1, r);
566 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
568 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
569 there should be still enough space in FIFO to allow the our link
570 partner time to process this frame and also time to send PAUSE
573 Here is the worst case scenario for the RX FIFO "headroom"
574 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
576 1) One maximum-length frame on TX 1522 bytes
577 2) One PAUSE frame time 64 bytes
578 3) PAUSE frame decode time allowance 64 bytes
579 4) One maximum-length frame on RX 1522 bytes
580 5) Round-trip propagation delay of the link (100Mb) 15 bytes
584 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
585 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
587 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
588 rx_size / 4 / dev->fifo_entry_size);
589 out_be32(&p->rwmr, r);
591 /* Set PAUSE timer to the maximum */
592 out_be32(&p->ptr, 0xffff);
595 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
596 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
597 EMAC_ISR_IRE | EMAC_ISR_TE;
598 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
599 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
601 out_be32(&p->iser, r);
603 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
604 if (emac_phy_gpcs(dev->phy.mode))
605 emac_mii_reset_phy(&dev->phy);
610 static void emac_reinitialize(struct emac_instance *dev)
612 DBG(dev, "reinitialize" NL);
614 emac_netif_stop(dev);
615 if (!emac_configure(dev)) {
619 emac_netif_start(dev);
622 static void emac_full_tx_reset(struct emac_instance *dev)
624 DBG(dev, "full_tx_reset" NL);
626 emac_tx_disable(dev);
627 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
628 emac_clean_tx_ring(dev);
629 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
633 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
638 static void emac_reset_work(struct work_struct *work)
640 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
642 DBG(dev, "reset_work" NL);
644 mutex_lock(&dev->link_lock);
645 emac_netif_stop(dev);
646 emac_full_tx_reset(dev);
647 emac_netif_start(dev);
648 mutex_unlock(&dev->link_lock);
651 static void emac_tx_timeout(struct net_device *ndev)
653 struct emac_instance *dev = netdev_priv(ndev);
655 DBG(dev, "tx_timeout" NL);
657 schedule_work(&dev->reset_work);
661 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
663 int done = !!(stacr & EMAC_STACR_OC);
665 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
671 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
673 struct emac_regs __iomem *p = dev->emacp;
675 int n, err = -ETIMEDOUT;
677 mutex_lock(&dev->mdio_lock);
679 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
681 /* Enable proper MDIO port */
682 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
683 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
684 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
685 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
687 /* Wait for management interface to become idle */
689 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
692 DBG2(dev, " -> timeout wait idle\n");
697 /* Issue read command */
698 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
699 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
701 r = EMAC_STACR_BASE(dev->opb_bus_freq);
702 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
704 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
705 r |= EMACX_STACR_STAC_READ;
707 r |= EMAC_STACR_STAC_READ;
708 r |= (reg & EMAC_STACR_PRA_MASK)
709 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
710 out_be32(&p->stacr, r);
712 /* Wait for read to complete */
714 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
717 DBG2(dev, " -> timeout wait complete\n");
722 if (unlikely(r & EMAC_STACR_PHYE)) {
723 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
728 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
730 DBG2(dev, "mdio_read -> %04x" NL, r);
733 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
734 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
735 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
736 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
737 mutex_unlock(&dev->mdio_lock);
739 return err == 0 ? r : err;
742 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
745 struct emac_regs __iomem *p = dev->emacp;
747 int n, err = -ETIMEDOUT;
749 mutex_lock(&dev->mdio_lock);
751 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
753 /* Enable proper MDIO port */
754 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
755 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
757 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
759 /* Wait for management interface to be idle */
761 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
764 DBG2(dev, " -> timeout wait idle\n");
769 /* Issue write command */
770 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
771 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
773 r = EMAC_STACR_BASE(dev->opb_bus_freq);
774 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
776 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
777 r |= EMACX_STACR_STAC_WRITE;
779 r |= EMAC_STACR_STAC_WRITE;
780 r |= (reg & EMAC_STACR_PRA_MASK) |
781 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
782 (val << EMAC_STACR_PHYD_SHIFT);
783 out_be32(&p->stacr, r);
785 /* Wait for write to complete */
787 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
790 DBG2(dev, " -> timeout wait complete\n");
796 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
797 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
798 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
799 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
800 mutex_unlock(&dev->mdio_lock);
803 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
805 struct emac_instance *dev = netdev_priv(ndev);
808 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
813 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
815 struct emac_instance *dev = netdev_priv(ndev);
817 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
818 (u8) id, (u8) reg, (u16) val);
822 static void __emac_set_multicast_list(struct emac_instance *dev)
824 struct emac_regs __iomem *p = dev->emacp;
825 u32 rmr = emac_iff2rmr(dev->ndev);
827 DBG(dev, "__multicast %08x" NL, rmr);
829 /* I decided to relax register access rules here to avoid
832 * There is a real problem with EMAC4 core if we use MWSW_001 bit
833 * in MR1 register and do a full EMAC reset.
834 * One TX BD status update is delayed and, after EMAC reset, it
835 * never happens, resulting in TX hung (it'll be recovered by TX
836 * timeout handler eventually, but this is just gross).
837 * So we either have to do full TX reset or try to cheat here :)
839 * The only required change is to RX mode register, so I *think* all
840 * we need is just to stop RX channel. This seems to work on all
843 * If we need the full reset, we might just trigger the workqueue
844 * and do it async... a bit nasty but should work --BenH
846 dev->mcast_pending = 0;
847 emac_rx_disable(dev);
848 if (rmr & EMAC_RMR_MAE)
850 out_be32(&p->rmr, rmr);
855 static void emac_set_multicast_list(struct net_device *ndev)
857 struct emac_instance *dev = netdev_priv(ndev);
859 DBG(dev, "multicast" NL);
861 BUG_ON(!netif_running(dev->ndev));
864 dev->mcast_pending = 1;
867 __emac_set_multicast_list(dev);
870 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
872 int rx_sync_size = emac_rx_sync_size(new_mtu);
873 int rx_skb_size = emac_rx_skb_size(new_mtu);
876 mutex_lock(&dev->link_lock);
877 emac_netif_stop(dev);
878 emac_rx_disable(dev);
879 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
881 if (dev->rx_sg_skb) {
882 ++dev->estats.rx_dropped_resize;
883 dev_kfree_skb(dev->rx_sg_skb);
884 dev->rx_sg_skb = NULL;
887 /* Make a first pass over RX ring and mark BDs ready, dropping
888 * non-processed packets on the way. We need this as a separate pass
889 * to simplify error recovery in the case of allocation failure later.
891 for (i = 0; i < NUM_RX_BUFF; ++i) {
892 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
893 ++dev->estats.rx_dropped_resize;
895 dev->rx_desc[i].data_len = 0;
896 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
897 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
900 /* Reallocate RX ring only if bigger skb buffers are required */
901 if (rx_skb_size <= dev->rx_skb_size)
904 /* Second pass, allocate new skbs */
905 for (i = 0; i < NUM_RX_BUFF; ++i) {
906 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
912 BUG_ON(!dev->rx_skb[i]);
913 dev_kfree_skb(dev->rx_skb[i]);
915 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
916 dev->rx_desc[i].data_ptr =
917 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
918 DMA_FROM_DEVICE) + 2;
919 dev->rx_skb[i] = skb;
922 /* Check if we need to change "Jumbo" bit in MR1 */
923 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
924 /* This is to prevent starting RX channel in emac_rx_enable() */
925 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
927 dev->ndev->mtu = new_mtu;
928 emac_full_tx_reset(dev);
931 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
934 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
936 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
938 emac_netif_start(dev);
939 mutex_unlock(&dev->link_lock);
944 /* Process ctx, rtnl_lock semaphore */
945 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
947 struct emac_instance *dev = netdev_priv(ndev);
950 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
953 DBG(dev, "change_mtu(%d)" NL, new_mtu);
955 if (netif_running(ndev)) {
956 /* Check if we really need to reinitalize RX ring */
957 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
958 ret = emac_resize_rx_ring(dev, new_mtu);
963 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
964 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
970 static void emac_clean_tx_ring(struct emac_instance *dev)
974 for (i = 0; i < NUM_TX_BUFF; ++i) {
975 if (dev->tx_skb[i]) {
976 dev_kfree_skb(dev->tx_skb[i]);
977 dev->tx_skb[i] = NULL;
978 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
979 ++dev->estats.tx_dropped;
981 dev->tx_desc[i].ctrl = 0;
982 dev->tx_desc[i].data_ptr = 0;
986 static void emac_clean_rx_ring(struct emac_instance *dev)
990 for (i = 0; i < NUM_RX_BUFF; ++i)
991 if (dev->rx_skb[i]) {
992 dev->rx_desc[i].ctrl = 0;
993 dev_kfree_skb(dev->rx_skb[i]);
994 dev->rx_skb[i] = NULL;
995 dev->rx_desc[i].data_ptr = 0;
998 if (dev->rx_sg_skb) {
999 dev_kfree_skb(dev->rx_sg_skb);
1000 dev->rx_sg_skb = NULL;
1004 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1007 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1011 dev->rx_skb[slot] = skb;
1012 dev->rx_desc[slot].data_len = 0;
1014 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1015 dev->rx_desc[slot].data_ptr =
1016 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1017 DMA_FROM_DEVICE) + 2;
1019 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1020 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1025 static void emac_print_link_status(struct emac_instance *dev)
1027 if (netif_carrier_ok(dev->ndev))
1028 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1029 dev->ndev->name, dev->phy.speed,
1030 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1031 dev->phy.pause ? ", pause enabled" :
1032 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1034 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1037 /* Process ctx, rtnl_lock semaphore */
1038 static int emac_open(struct net_device *ndev)
1040 struct emac_instance *dev = netdev_priv(ndev);
1043 DBG(dev, "open" NL);
1045 /* Setup error IRQ handler */
1046 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1048 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1049 ndev->name, dev->emac_irq);
1053 /* Allocate RX ring */
1054 for (i = 0; i < NUM_RX_BUFF; ++i)
1055 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1056 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1061 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1062 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1063 dev->rx_sg_skb = NULL;
1065 mutex_lock(&dev->link_lock);
1067 /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and
1068 * always poll the PHY even when the iface is down ? That would allow
1069 * things like laptop-net to work. --BenH
1071 if (dev->phy.address >= 0) {
1072 int link_poll_interval;
1073 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1074 dev->phy.def->ops->read_link(&dev->phy);
1075 netif_carrier_on(dev->ndev);
1076 link_poll_interval = PHY_POLL_LINK_ON;
1078 netif_carrier_off(dev->ndev);
1079 link_poll_interval = PHY_POLL_LINK_OFF;
1081 dev->link_polling = 1;
1083 schedule_delayed_work(&dev->link_work, link_poll_interval);
1084 emac_print_link_status(dev);
1086 netif_carrier_on(dev->ndev);
1088 emac_configure(dev);
1089 mal_poll_add(dev->mal, &dev->commac);
1090 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1091 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1092 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1093 emac_tx_enable(dev);
1094 emac_rx_enable(dev);
1095 emac_netif_start(dev);
1097 mutex_unlock(&dev->link_lock);
1101 emac_clean_rx_ring(dev);
1102 free_irq(dev->emac_irq, dev);
1109 static int emac_link_differs(struct emac_instance *dev)
1111 u32 r = in_be32(&dev->emacp->mr1);
1113 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1114 int speed, pause, asym_pause;
1116 if (r & EMAC_MR1_MF_1000)
1118 else if (r & EMAC_MR1_MF_100)
1123 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1124 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1133 pause = asym_pause = 0;
1135 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1136 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1140 static void emac_link_timer(struct work_struct *work)
1142 struct emac_instance *dev =
1143 container_of((struct delayed_work *)work,
1144 struct emac_instance, link_work);
1145 int link_poll_interval;
1147 mutex_lock(&dev->link_lock);
1149 DBG2(dev, "link timer" NL);
1151 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1152 if (!netif_carrier_ok(dev->ndev)) {
1153 /* Get new link parameters */
1154 dev->phy.def->ops->read_link(&dev->phy);
1156 netif_carrier_on(dev->ndev);
1157 emac_netif_stop(dev);
1158 emac_full_tx_reset(dev);
1159 emac_netif_start(dev);
1160 emac_print_link_status(dev);
1162 link_poll_interval = PHY_POLL_LINK_ON;
1164 if (netif_carrier_ok(dev->ndev)) {
1165 emac_reinitialize(dev);
1166 netif_carrier_off(dev->ndev);
1167 netif_tx_disable(dev->ndev);
1168 emac_print_link_status(dev);
1170 link_poll_interval = PHY_POLL_LINK_OFF;
1172 schedule_delayed_work(&dev->link_work, link_poll_interval);
1174 mutex_unlock(&dev->link_lock);
1177 static void emac_force_link_update(struct emac_instance *dev)
1179 netif_carrier_off(dev->ndev);
1180 if (dev->link_polling) {
1181 cancel_rearming_delayed_work(&dev->link_work);
1182 if (dev->link_polling)
1183 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1187 /* Process ctx, rtnl_lock semaphore */
1188 static int emac_close(struct net_device *ndev)
1190 struct emac_instance *dev = netdev_priv(ndev);
1192 DBG(dev, "close" NL);
1194 if (dev->phy.address >= 0)
1195 cancel_rearming_delayed_work(&dev->link_work);
1197 emac_netif_stop(dev);
1198 flush_scheduled_work();
1200 emac_rx_disable(dev);
1201 emac_tx_disable(dev);
1202 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1203 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1204 mal_poll_del(dev->mal, &dev->commac);
1206 emac_clean_tx_ring(dev);
1207 emac_clean_rx_ring(dev);
1209 free_irq(dev->emac_irq, dev);
1214 static inline u16 emac_tx_csum(struct emac_instance *dev,
1215 struct sk_buff *skb)
1217 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1218 skb->ip_summed == CHECKSUM_PARTIAL)) {
1219 ++dev->stats.tx_packets_csum;
1220 return EMAC_TX_CTRL_TAH_CSUM;
1225 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1227 struct emac_regs __iomem *p = dev->emacp;
1228 struct net_device *ndev = dev->ndev;
1230 /* Send the packet out. If the if makes a significant perf
1231 * difference, then we can store the TMR0 value in "dev"
1234 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1235 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1237 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1239 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1240 netif_stop_queue(ndev);
1241 DBG2(dev, "stopped TX queue" NL);
1244 ndev->trans_start = jiffies;
1245 ++dev->stats.tx_packets;
1246 dev->stats.tx_bytes += len;
1252 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1254 struct emac_instance *dev = netdev_priv(ndev);
1255 unsigned int len = skb->len;
1258 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1259 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1261 slot = dev->tx_slot++;
1262 if (dev->tx_slot == NUM_TX_BUFF) {
1264 ctrl |= MAL_TX_CTRL_WRAP;
1267 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1269 dev->tx_skb[slot] = skb;
1270 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1273 dev->tx_desc[slot].data_len = (u16) len;
1275 dev->tx_desc[slot].ctrl = ctrl;
1277 return emac_xmit_finish(dev, len);
1280 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1281 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1282 u32 pd, int len, int last, u16 base_ctrl)
1285 u16 ctrl = base_ctrl;
1286 int chunk = min(len, MAL_MAX_TX_SIZE);
1289 slot = (slot + 1) % NUM_TX_BUFF;
1292 ctrl |= MAL_TX_CTRL_LAST;
1293 if (slot == NUM_TX_BUFF - 1)
1294 ctrl |= MAL_TX_CTRL_WRAP;
1296 dev->tx_skb[slot] = NULL;
1297 dev->tx_desc[slot].data_ptr = pd;
1298 dev->tx_desc[slot].data_len = (u16) chunk;
1299 dev->tx_desc[slot].ctrl = ctrl;
1310 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1311 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1313 struct emac_instance *dev = netdev_priv(ndev);
1314 int nr_frags = skb_shinfo(skb)->nr_frags;
1315 int len = skb->len, chunk;
1320 /* This is common "fast" path */
1321 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1322 return emac_start_xmit(skb, ndev);
1324 len -= skb->data_len;
1326 /* Note, this is only an *estimation*, we can still run out of empty
1327 * slots because of the additional fragmentation into
1328 * MAL_MAX_TX_SIZE-sized chunks
1330 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1333 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1334 emac_tx_csum(dev, skb);
1335 slot = dev->tx_slot;
1338 dev->tx_skb[slot] = NULL;
1339 chunk = min(len, MAL_MAX_TX_SIZE);
1340 dev->tx_desc[slot].data_ptr = pd =
1341 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1342 dev->tx_desc[slot].data_len = (u16) chunk;
1345 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1348 for (i = 0; i < nr_frags; ++i) {
1349 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1352 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1355 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1358 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1362 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1364 /* Attach skb to the last slot so we don't release it too early */
1365 dev->tx_skb[slot] = skb;
1367 /* Send the packet out */
1368 if (dev->tx_slot == NUM_TX_BUFF - 1)
1369 ctrl |= MAL_TX_CTRL_WRAP;
1371 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1372 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1374 return emac_xmit_finish(dev, skb->len);
1377 /* Well, too bad. Our previous estimation was overly optimistic.
1380 while (slot != dev->tx_slot) {
1381 dev->tx_desc[slot].ctrl = 0;
1384 slot = NUM_TX_BUFF - 1;
1386 ++dev->estats.tx_undo;
1389 netif_stop_queue(ndev);
1390 DBG2(dev, "stopped TX queue" NL);
1394 # define emac_start_xmit_sg emac_start_xmit
1395 #endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1398 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1400 struct emac_error_stats *st = &dev->estats;
1402 DBG(dev, "BD TX error %04x" NL, ctrl);
1405 if (ctrl & EMAC_TX_ST_BFCS)
1406 ++st->tx_bd_bad_fcs;
1407 if (ctrl & EMAC_TX_ST_LCS)
1408 ++st->tx_bd_carrier_loss;
1409 if (ctrl & EMAC_TX_ST_ED)
1410 ++st->tx_bd_excessive_deferral;
1411 if (ctrl & EMAC_TX_ST_EC)
1412 ++st->tx_bd_excessive_collisions;
1413 if (ctrl & EMAC_TX_ST_LC)
1414 ++st->tx_bd_late_collision;
1415 if (ctrl & EMAC_TX_ST_MC)
1416 ++st->tx_bd_multple_collisions;
1417 if (ctrl & EMAC_TX_ST_SC)
1418 ++st->tx_bd_single_collision;
1419 if (ctrl & EMAC_TX_ST_UR)
1420 ++st->tx_bd_underrun;
1421 if (ctrl & EMAC_TX_ST_SQE)
1425 static void emac_poll_tx(void *param)
1427 struct emac_instance *dev = param;
1430 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1432 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1433 bad_mask = EMAC_IS_BAD_TX_TAH;
1435 bad_mask = EMAC_IS_BAD_TX;
1437 netif_tx_lock_bh(dev->ndev);
1440 int slot = dev->ack_slot, n = 0;
1442 ctrl = dev->tx_desc[slot].ctrl;
1443 if (!(ctrl & MAL_TX_CTRL_READY)) {
1444 struct sk_buff *skb = dev->tx_skb[slot];
1449 dev->tx_skb[slot] = NULL;
1451 slot = (slot + 1) % NUM_TX_BUFF;
1453 if (unlikely(ctrl & bad_mask))
1454 emac_parse_tx_error(dev, ctrl);
1460 dev->ack_slot = slot;
1461 if (netif_queue_stopped(dev->ndev) &&
1462 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1463 netif_wake_queue(dev->ndev);
1465 DBG2(dev, "tx %d pkts" NL, n);
1468 netif_tx_unlock_bh(dev->ndev);
1471 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1474 struct sk_buff *skb = dev->rx_skb[slot];
1476 DBG2(dev, "recycle %d %d" NL, slot, len);
1479 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1480 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1482 dev->rx_desc[slot].data_len = 0;
1484 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1485 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1488 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1490 struct emac_error_stats *st = &dev->estats;
1492 DBG(dev, "BD RX error %04x" NL, ctrl);
1495 if (ctrl & EMAC_RX_ST_OE)
1496 ++st->rx_bd_overrun;
1497 if (ctrl & EMAC_RX_ST_BP)
1498 ++st->rx_bd_bad_packet;
1499 if (ctrl & EMAC_RX_ST_RP)
1500 ++st->rx_bd_runt_packet;
1501 if (ctrl & EMAC_RX_ST_SE)
1502 ++st->rx_bd_short_event;
1503 if (ctrl & EMAC_RX_ST_AE)
1504 ++st->rx_bd_alignment_error;
1505 if (ctrl & EMAC_RX_ST_BFCS)
1506 ++st->rx_bd_bad_fcs;
1507 if (ctrl & EMAC_RX_ST_PTL)
1508 ++st->rx_bd_packet_too_long;
1509 if (ctrl & EMAC_RX_ST_ORE)
1510 ++st->rx_bd_out_of_range;
1511 if (ctrl & EMAC_RX_ST_IRE)
1512 ++st->rx_bd_in_range;
1515 static inline void emac_rx_csum(struct emac_instance *dev,
1516 struct sk_buff *skb, u16 ctrl)
1518 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1519 if (!ctrl && dev->tah_dev) {
1520 skb->ip_summed = CHECKSUM_UNNECESSARY;
1521 ++dev->stats.rx_packets_csum;
1526 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1528 if (likely(dev->rx_sg_skb != NULL)) {
1529 int len = dev->rx_desc[slot].data_len;
1530 int tot_len = dev->rx_sg_skb->len + len;
1532 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1533 ++dev->estats.rx_dropped_mtu;
1534 dev_kfree_skb(dev->rx_sg_skb);
1535 dev->rx_sg_skb = NULL;
1537 cacheable_memcpy(dev->rx_sg_skb->tail,
1538 dev->rx_skb[slot]->data, len);
1539 skb_put(dev->rx_sg_skb, len);
1540 emac_recycle_rx_skb(dev, slot, len);
1544 emac_recycle_rx_skb(dev, slot, 0);
1548 /* NAPI poll context */
1549 static int emac_poll_rx(void *param, int budget)
1551 struct emac_instance *dev = param;
1552 int slot = dev->rx_slot, received = 0;
1554 DBG2(dev, "poll_rx(%d)" NL, budget);
1557 while (budget > 0) {
1559 struct sk_buff *skb;
1560 u16 ctrl = dev->rx_desc[slot].ctrl;
1562 if (ctrl & MAL_RX_CTRL_EMPTY)
1565 skb = dev->rx_skb[slot];
1567 len = dev->rx_desc[slot].data_len;
1569 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1572 ctrl &= EMAC_BAD_RX_MASK;
1573 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1574 emac_parse_rx_error(dev, ctrl);
1575 ++dev->estats.rx_dropped_error;
1576 emac_recycle_rx_skb(dev, slot, 0);
1581 if (len && len < EMAC_RX_COPY_THRESH) {
1582 struct sk_buff *copy_skb =
1583 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1584 if (unlikely(!copy_skb))
1587 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1588 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1590 emac_recycle_rx_skb(dev, slot, len);
1592 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1597 skb->dev = dev->ndev;
1598 skb->protocol = eth_type_trans(skb, dev->ndev);
1599 emac_rx_csum(dev, skb, ctrl);
1601 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1602 ++dev->estats.rx_dropped_stack;
1604 ++dev->stats.rx_packets;
1606 dev->stats.rx_bytes += len;
1607 slot = (slot + 1) % NUM_RX_BUFF;
1612 if (ctrl & MAL_RX_CTRL_FIRST) {
1613 BUG_ON(dev->rx_sg_skb);
1614 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1615 DBG(dev, "rx OOM %d" NL, slot);
1616 ++dev->estats.rx_dropped_oom;
1617 emac_recycle_rx_skb(dev, slot, 0);
1619 dev->rx_sg_skb = skb;
1622 } else if (!emac_rx_sg_append(dev, slot) &&
1623 (ctrl & MAL_RX_CTRL_LAST)) {
1625 skb = dev->rx_sg_skb;
1626 dev->rx_sg_skb = NULL;
1628 ctrl &= EMAC_BAD_RX_MASK;
1629 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1630 emac_parse_rx_error(dev, ctrl);
1631 ++dev->estats.rx_dropped_error;
1639 DBG(dev, "rx OOM %d" NL, slot);
1640 /* Drop the packet and recycle skb */
1641 ++dev->estats.rx_dropped_oom;
1642 emac_recycle_rx_skb(dev, slot, 0);
1647 DBG2(dev, "rx %d BDs" NL, received);
1648 dev->rx_slot = slot;
1651 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1653 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1654 DBG2(dev, "rx restart" NL);
1659 if (dev->rx_sg_skb) {
1660 DBG2(dev, "dropping partial rx packet" NL);
1661 ++dev->estats.rx_dropped_error;
1662 dev_kfree_skb(dev->rx_sg_skb);
1663 dev->rx_sg_skb = NULL;
1666 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1667 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1668 emac_rx_enable(dev);
1674 /* NAPI poll context */
1675 static int emac_peek_rx(void *param)
1677 struct emac_instance *dev = param;
1679 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1682 /* NAPI poll context */
1683 static int emac_peek_rx_sg(void *param)
1685 struct emac_instance *dev = param;
1687 int slot = dev->rx_slot;
1689 u16 ctrl = dev->rx_desc[slot].ctrl;
1690 if (ctrl & MAL_RX_CTRL_EMPTY)
1692 else if (ctrl & MAL_RX_CTRL_LAST)
1695 slot = (slot + 1) % NUM_RX_BUFF;
1697 /* I'm just being paranoid here :) */
1698 if (unlikely(slot == dev->rx_slot))
1704 static void emac_rxde(void *param)
1706 struct emac_instance *dev = param;
1708 ++dev->estats.rx_stopped;
1709 emac_rx_disable_async(dev);
1713 static irqreturn_t emac_irq(int irq, void *dev_instance)
1715 struct emac_instance *dev = dev_instance;
1716 struct emac_regs __iomem *p = dev->emacp;
1717 struct emac_error_stats *st = &dev->estats;
1720 spin_lock(&dev->lock);
1722 isr = in_be32(&p->isr);
1723 out_be32(&p->isr, isr);
1725 DBG(dev, "isr = %08x" NL, isr);
1727 if (isr & EMAC4_ISR_TXPE)
1729 if (isr & EMAC4_ISR_RXPE)
1731 if (isr & EMAC4_ISR_TXUE)
1733 if (isr & EMAC4_ISR_RXOE)
1734 ++st->rx_fifo_overrun;
1735 if (isr & EMAC_ISR_OVR)
1737 if (isr & EMAC_ISR_BP)
1738 ++st->rx_bad_packet;
1739 if (isr & EMAC_ISR_RP)
1740 ++st->rx_runt_packet;
1741 if (isr & EMAC_ISR_SE)
1742 ++st->rx_short_event;
1743 if (isr & EMAC_ISR_ALE)
1744 ++st->rx_alignment_error;
1745 if (isr & EMAC_ISR_BFCS)
1747 if (isr & EMAC_ISR_PTLE)
1748 ++st->rx_packet_too_long;
1749 if (isr & EMAC_ISR_ORE)
1750 ++st->rx_out_of_range;
1751 if (isr & EMAC_ISR_IRE)
1753 if (isr & EMAC_ISR_SQE)
1755 if (isr & EMAC_ISR_TE)
1758 spin_unlock(&dev->lock);
1763 static struct net_device_stats *emac_stats(struct net_device *ndev)
1765 struct emac_instance *dev = netdev_priv(ndev);
1766 struct emac_stats *st = &dev->stats;
1767 struct emac_error_stats *est = &dev->estats;
1768 struct net_device_stats *nst = &dev->nstats;
1769 unsigned long flags;
1771 DBG2(dev, "stats" NL);
1773 /* Compute "legacy" statistics */
1774 spin_lock_irqsave(&dev->lock, flags);
1775 nst->rx_packets = (unsigned long)st->rx_packets;
1776 nst->rx_bytes = (unsigned long)st->rx_bytes;
1777 nst->tx_packets = (unsigned long)st->tx_packets;
1778 nst->tx_bytes = (unsigned long)st->tx_bytes;
1779 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1780 est->rx_dropped_error +
1781 est->rx_dropped_resize +
1782 est->rx_dropped_mtu);
1783 nst->tx_dropped = (unsigned long)est->tx_dropped;
1785 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1786 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1787 est->rx_fifo_overrun +
1789 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1790 est->rx_alignment_error);
1791 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1793 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1794 est->rx_bd_short_event +
1795 est->rx_bd_packet_too_long +
1796 est->rx_bd_out_of_range +
1797 est->rx_bd_in_range +
1798 est->rx_runt_packet +
1799 est->rx_short_event +
1800 est->rx_packet_too_long +
1801 est->rx_out_of_range +
1804 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1805 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1807 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1808 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1809 est->tx_bd_excessive_collisions +
1810 est->tx_bd_late_collision +
1811 est->tx_bd_multple_collisions);
1812 spin_unlock_irqrestore(&dev->lock, flags);
1816 static struct mal_commac_ops emac_commac_ops = {
1817 .poll_tx = &emac_poll_tx,
1818 .poll_rx = &emac_poll_rx,
1819 .peek_rx = &emac_peek_rx,
1823 static struct mal_commac_ops emac_commac_sg_ops = {
1824 .poll_tx = &emac_poll_tx,
1825 .poll_rx = &emac_poll_rx,
1826 .peek_rx = &emac_peek_rx_sg,
1830 /* Ethtool support */
1831 static int emac_ethtool_get_settings(struct net_device *ndev,
1832 struct ethtool_cmd *cmd)
1834 struct emac_instance *dev = netdev_priv(ndev);
1836 cmd->supported = dev->phy.features;
1837 cmd->port = PORT_MII;
1838 cmd->phy_address = dev->phy.address;
1840 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1842 mutex_lock(&dev->link_lock);
1843 cmd->advertising = dev->phy.advertising;
1844 cmd->autoneg = dev->phy.autoneg;
1845 cmd->speed = dev->phy.speed;
1846 cmd->duplex = dev->phy.duplex;
1847 mutex_unlock(&dev->link_lock);
1852 static int emac_ethtool_set_settings(struct net_device *ndev,
1853 struct ethtool_cmd *cmd)
1855 struct emac_instance *dev = netdev_priv(ndev);
1856 u32 f = dev->phy.features;
1858 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1859 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1861 /* Basic sanity checks */
1862 if (dev->phy.address < 0)
1864 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1866 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1868 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1871 if (cmd->autoneg == AUTONEG_DISABLE) {
1872 switch (cmd->speed) {
1874 if (cmd->duplex == DUPLEX_HALF
1875 && !(f & SUPPORTED_10baseT_Half))
1877 if (cmd->duplex == DUPLEX_FULL
1878 && !(f & SUPPORTED_10baseT_Full))
1882 if (cmd->duplex == DUPLEX_HALF
1883 && !(f & SUPPORTED_100baseT_Half))
1885 if (cmd->duplex == DUPLEX_FULL
1886 && !(f & SUPPORTED_100baseT_Full))
1890 if (cmd->duplex == DUPLEX_HALF
1891 && !(f & SUPPORTED_1000baseT_Half))
1893 if (cmd->duplex == DUPLEX_FULL
1894 && !(f & SUPPORTED_1000baseT_Full))
1901 mutex_lock(&dev->link_lock);
1902 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1904 mutex_unlock(&dev->link_lock);
1907 if (!(f & SUPPORTED_Autoneg))
1910 mutex_lock(&dev->link_lock);
1911 dev->phy.def->ops->setup_aneg(&dev->phy,
1912 (cmd->advertising & f) |
1913 (dev->phy.advertising &
1915 ADVERTISED_Asym_Pause)));
1916 mutex_unlock(&dev->link_lock);
1918 emac_force_link_update(dev);
1923 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1924 struct ethtool_ringparam *rp)
1926 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1927 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1930 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1931 struct ethtool_pauseparam *pp)
1933 struct emac_instance *dev = netdev_priv(ndev);
1935 mutex_lock(&dev->link_lock);
1936 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1937 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1940 if (dev->phy.duplex == DUPLEX_FULL) {
1942 pp->rx_pause = pp->tx_pause = 1;
1943 else if (dev->phy.asym_pause)
1946 mutex_unlock(&dev->link_lock);
1949 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1951 struct emac_instance *dev = netdev_priv(ndev);
1953 return dev->tah_dev != 0;
1956 static int emac_get_regs_len(struct emac_instance *dev)
1958 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1959 return sizeof(struct emac_ethtool_regs_subhdr) +
1960 EMAC4_ETHTOOL_REGS_SIZE;
1962 return sizeof(struct emac_ethtool_regs_subhdr) +
1963 EMAC_ETHTOOL_REGS_SIZE;
1966 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1968 struct emac_instance *dev = netdev_priv(ndev);
1971 size = sizeof(struct emac_ethtool_regs_hdr) +
1972 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1973 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1974 size += zmii_get_regs_len(dev->zmii_dev);
1975 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1976 size += rgmii_get_regs_len(dev->rgmii_dev);
1977 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1978 size += tah_get_regs_len(dev->tah_dev);
1983 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
1985 struct emac_ethtool_regs_subhdr *hdr = buf;
1987 hdr->index = dev->cell_index;
1988 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1989 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1990 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
1991 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
1993 hdr->version = EMAC_ETHTOOL_REGS_VER;
1994 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1995 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1999 static void emac_ethtool_get_regs(struct net_device *ndev,
2000 struct ethtool_regs *regs, void *buf)
2002 struct emac_instance *dev = netdev_priv(ndev);
2003 struct emac_ethtool_regs_hdr *hdr = buf;
2005 hdr->components = 0;
2008 buf = mal_dump_regs(dev->mal, buf);
2009 buf = emac_dump_regs(dev, buf);
2010 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2011 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2012 buf = zmii_dump_regs(dev->zmii_dev, buf);
2014 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2015 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2016 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2018 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2019 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2020 buf = tah_dump_regs(dev->tah_dev, buf);
2024 static int emac_ethtool_nway_reset(struct net_device *ndev)
2026 struct emac_instance *dev = netdev_priv(ndev);
2029 DBG(dev, "nway_reset" NL);
2031 if (dev->phy.address < 0)
2034 mutex_lock(&dev->link_lock);
2035 if (!dev->phy.autoneg) {
2040 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2042 mutex_unlock(&dev->link_lock);
2043 emac_force_link_update(dev);
2047 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2049 return EMAC_ETHTOOL_STATS_COUNT;
2052 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2055 if (stringset == ETH_SS_STATS)
2056 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2059 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2060 struct ethtool_stats *estats,
2063 struct emac_instance *dev = netdev_priv(ndev);
2065 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2066 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2067 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2070 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2071 struct ethtool_drvinfo *info)
2073 struct emac_instance *dev = netdev_priv(ndev);
2075 strcpy(info->driver, "ibm_emac");
2076 strcpy(info->version, DRV_VERSION);
2077 info->fw_version[0] = '\0';
2078 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2079 dev->cell_index, dev->ofdev->node->full_name);
2080 info->n_stats = emac_ethtool_get_stats_count(ndev);
2081 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2084 static const struct ethtool_ops emac_ethtool_ops = {
2085 .get_settings = emac_ethtool_get_settings,
2086 .set_settings = emac_ethtool_set_settings,
2087 .get_drvinfo = emac_ethtool_get_drvinfo,
2089 .get_regs_len = emac_ethtool_get_regs_len,
2090 .get_regs = emac_ethtool_get_regs,
2092 .nway_reset = emac_ethtool_nway_reset,
2094 .get_ringparam = emac_ethtool_get_ringparam,
2095 .get_pauseparam = emac_ethtool_get_pauseparam,
2097 .get_rx_csum = emac_ethtool_get_rx_csum,
2099 .get_strings = emac_ethtool_get_strings,
2100 .get_stats_count = emac_ethtool_get_stats_count,
2101 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2103 .get_link = ethtool_op_get_link,
2104 .get_tx_csum = ethtool_op_get_tx_csum,
2105 .get_sg = ethtool_op_get_sg,
2108 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2110 struct emac_instance *dev = netdev_priv(ndev);
2111 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2113 DBG(dev, "ioctl %08x" NL, cmd);
2115 if (dev->phy.address < 0)
2120 case SIOCDEVPRIVATE:
2121 data[0] = dev->phy.address;
2124 case SIOCDEVPRIVATE + 1:
2125 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2129 case SIOCDEVPRIVATE + 2:
2130 if (!capable(CAP_NET_ADMIN))
2132 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2139 struct emac_depentry {
2141 struct device_node *node;
2142 struct of_device *ofdev;
2146 #define EMAC_DEP_MAL_IDX 0
2147 #define EMAC_DEP_ZMII_IDX 1
2148 #define EMAC_DEP_RGMII_IDX 2
2149 #define EMAC_DEP_TAH_IDX 3
2150 #define EMAC_DEP_MDIO_IDX 4
2151 #define EMAC_DEP_PREV_IDX 5
2152 #define EMAC_DEP_COUNT 6
2154 static int __devinit emac_check_deps(struct emac_instance *dev,
2155 struct emac_depentry *deps)
2158 struct device_node *np;
2160 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2161 /* no dependency on that item, allright */
2162 if (deps[i].phandle == 0) {
2166 /* special case for blist as the dependency might go away */
2167 if (i == EMAC_DEP_PREV_IDX) {
2168 np = *(dev->blist - 1);
2170 deps[i].phandle = 0;
2174 if (deps[i].node == NULL)
2175 deps[i].node = of_node_get(np);
2177 if (deps[i].node == NULL)
2178 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2179 if (deps[i].node == NULL)
2181 if (deps[i].ofdev == NULL)
2182 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2183 if (deps[i].ofdev == NULL)
2185 if (deps[i].drvdata == NULL)
2186 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2187 if (deps[i].drvdata != NULL)
2190 return (there == EMAC_DEP_COUNT);
2193 static void emac_put_deps(struct emac_instance *dev)
2196 of_dev_put(dev->mal_dev);
2198 of_dev_put(dev->zmii_dev);
2200 of_dev_put(dev->rgmii_dev);
2202 of_dev_put(dev->mdio_dev);
2204 of_dev_put(dev->tah_dev);
2207 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2208 unsigned long action, void *data)
2210 /* We are only intereted in device addition */
2211 if (action == BUS_NOTIFY_BOUND_DRIVER)
2212 wake_up_all(&emac_probe_wait);
2216 static struct notifier_block emac_of_bus_notifier = {
2217 .notifier_call = emac_of_bus_notify
2220 static int __devinit emac_wait_deps(struct emac_instance *dev)
2222 struct emac_depentry deps[EMAC_DEP_COUNT];
2225 memset(&deps, 0, sizeof(deps));
2227 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2228 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2229 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2231 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2233 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2234 if (dev->blist && dev->blist > emac_boot_list)
2235 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2236 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2237 wait_event_timeout(emac_probe_wait,
2238 emac_check_deps(dev, deps),
2239 EMAC_PROBE_DEP_TIMEOUT);
2240 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2241 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2242 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2244 of_node_put(deps[i].node);
2245 if (err && deps[i].ofdev)
2246 of_dev_put(deps[i].ofdev);
2249 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2250 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2251 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2252 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2253 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2255 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2256 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2260 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2261 u32 *val, int fatal)
2264 const u32 *prop = of_get_property(np, name, &len);
2265 if (prop == NULL || len < sizeof(u32)) {
2267 printk(KERN_ERR "%s: missing %s property\n",
2268 np->full_name, name);
2275 static int __devinit emac_init_phy(struct emac_instance *dev)
2277 struct device_node *np = dev->ofdev->node;
2278 struct net_device *ndev = dev->ndev;
2282 dev->phy.dev = ndev;
2283 dev->phy.mode = dev->phy_mode;
2285 /* PHY-less configuration.
2286 * XXX I probably should move these settings to the dev tree
2288 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2291 /* PHY-less configuration.
2292 * XXX I probably should move these settings to the dev tree
2294 dev->phy.address = -1;
2295 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2301 mutex_lock(&emac_phy_map_lock);
2302 phy_map = dev->phy_map | busy_phy_map;
2304 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2306 dev->phy.mdio_read = emac_mdio_read;
2307 dev->phy.mdio_write = emac_mdio_write;
2309 /* Configure EMAC with defaults so we can at least use MDIO
2310 * This is needed mostly for 440GX
2312 if (emac_phy_gpcs(dev->phy.mode)) {
2314 * Make GPCS PHY address equal to EMAC index.
2315 * We probably should take into account busy_phy_map
2316 * and/or phy_map here.
2318 * Note that the busy_phy_map is currently global
2319 * while it should probably be per-ASIC...
2321 dev->phy.address = dev->cell_index;
2324 emac_configure(dev);
2326 if (dev->phy_address != 0xffffffff)
2327 phy_map = ~(1 << dev->phy_address);
2329 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2330 if (!(phy_map & 1)) {
2332 busy_phy_map |= 1 << i;
2334 /* Quick check if there is a PHY at the address */
2335 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2336 if (r == 0xffff || r < 0)
2338 if (!emac_mii_phy_probe(&dev->phy, i))
2341 mutex_unlock(&emac_phy_map_lock);
2343 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2348 if (dev->phy.def->ops->init)
2349 dev->phy.def->ops->init(&dev->phy);
2351 /* Disable any PHY features not supported by the platform */
2352 dev->phy.def->features &= ~dev->phy_feat_exc;
2354 /* Setup initial link parameters */
2355 if (dev->phy.features & SUPPORTED_Autoneg) {
2356 adv = dev->phy.features;
2357 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2358 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2359 /* Restart autonegotiation */
2360 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2362 u32 f = dev->phy.def->features;
2363 int speed = SPEED_10, fd = DUPLEX_HALF;
2365 /* Select highest supported speed/duplex */
2366 if (f & SUPPORTED_1000baseT_Full) {
2369 } else if (f & SUPPORTED_1000baseT_Half)
2371 else if (f & SUPPORTED_100baseT_Full) {
2374 } else if (f & SUPPORTED_100baseT_Half)
2376 else if (f & SUPPORTED_10baseT_Full)
2379 /* Force link parameters */
2380 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2385 static int __devinit emac_init_config(struct emac_instance *dev)
2387 struct device_node *np = dev->ofdev->node;
2390 const char *pm, *phy_modes[] = {
2392 [PHY_MODE_MII] = "mii",
2393 [PHY_MODE_RMII] = "rmii",
2394 [PHY_MODE_SMII] = "smii",
2395 [PHY_MODE_RGMII] = "rgmii",
2396 [PHY_MODE_TBI] = "tbi",
2397 [PHY_MODE_GMII] = "gmii",
2398 [PHY_MODE_RTBI] = "rtbi",
2399 [PHY_MODE_SGMII] = "sgmii",
2402 /* Read config from device-tree */
2403 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2405 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2407 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2409 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2411 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2412 dev->max_mtu = 1500;
2413 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2414 dev->rx_fifo_size = 2048;
2415 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2416 dev->tx_fifo_size = 2048;
2417 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2418 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2419 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2420 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2421 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2422 dev->phy_address = 0xffffffff;
2423 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2424 dev->phy_map = 0xffffffff;
2425 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2427 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2429 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2431 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2433 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2435 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2436 dev->zmii_port = 0xffffffff;;
2437 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2439 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2440 dev->rgmii_port = 0xffffffff;;
2441 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2442 dev->fifo_entry_size = 16;
2443 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2444 dev->mal_burst_size = 256;
2446 /* PHY mode needs some decoding */
2447 dev->phy_mode = PHY_MODE_NA;
2448 pm = of_get_property(np, "phy-mode", &plen);
2451 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2452 if (!strcasecmp(pm, phy_modes[i])) {
2458 /* Backward compat with non-final DT */
2459 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2460 u32 nmode = *(const u32 *)pm;
2461 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2462 dev->phy_mode = nmode;
2465 /* Check EMAC version */
2466 if (of_device_is_compatible(np, "ibm,emac4"))
2467 dev->features |= EMAC_FTR_EMAC4;
2468 if (of_device_is_compatible(np, "ibm,emac-axon")
2469 || of_device_is_compatible(np, "ibm,emac-440epx"))
2470 dev->features |= EMAC_FTR_HAS_AXON_STACR
2471 | EMAC_FTR_STACR_OC_INVERT;
2472 if (of_device_is_compatible(np, "ibm,emac-440spe"))
2473 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2475 /* Fixup some feature bits based on the device tree and verify
2476 * we have support for them compiled in
2478 if (dev->tah_ph != 0) {
2479 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2480 dev->features |= EMAC_FTR_HAS_TAH;
2482 printk(KERN_ERR "%s: TAH support not enabled !\n",
2488 if (dev->zmii_ph != 0) {
2489 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2490 dev->features |= EMAC_FTR_HAS_ZMII;
2492 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2498 if (dev->rgmii_ph != 0) {
2499 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2500 dev->features |= EMAC_FTR_HAS_RGMII;
2502 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2508 /* Read MAC-address */
2509 p = of_get_property(np, "local-mac-address", NULL);
2511 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2515 memcpy(dev->ndev->dev_addr, p, 6);
2517 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2518 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2519 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2520 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2521 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2526 static int __devinit emac_probe(struct of_device *ofdev,
2527 const struct of_device_id *match)
2529 struct net_device *ndev;
2530 struct emac_instance *dev;
2531 struct device_node *np = ofdev->node;
2532 struct device_node **blist = NULL;
2535 /* Find ourselves in the bootlist if we are there */
2536 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2537 if (emac_boot_list[i] == np)
2538 blist = &emac_boot_list[i];
2540 /* Allocate our net_device structure */
2542 ndev = alloc_etherdev(sizeof(struct emac_instance));
2544 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2548 dev = netdev_priv(ndev);
2552 SET_MODULE_OWNER(ndev);
2553 SET_NETDEV_DEV(ndev, &ofdev->dev);
2555 /* Initialize some embedded data structures */
2556 mutex_init(&dev->mdio_lock);
2557 mutex_init(&dev->link_lock);
2558 spin_lock_init(&dev->lock);
2559 INIT_WORK(&dev->reset_work, emac_reset_work);
2561 /* Init various config data based on device-tree */
2562 err = emac_init_config(dev);
2566 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2567 dev->emac_irq = irq_of_parse_and_map(np, 0);
2568 dev->wol_irq = irq_of_parse_and_map(np, 1);
2569 if (dev->emac_irq == NO_IRQ) {
2570 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2573 ndev->irq = dev->emac_irq;
2576 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2577 printk(KERN_ERR "%s: Can't get registers address\n",
2581 // TODO : request_mem_region
2582 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2583 if (dev->emacp == NULL) {
2584 printk(KERN_ERR "%s: Can't map device registers!\n",
2590 /* Wait for dependent devices */
2591 err = emac_wait_deps(dev);
2594 "%s: Timeout waiting for dependent devices\n",
2596 /* display more info about what's missing ? */
2599 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2600 if (dev->mdio_dev != NULL)
2601 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2603 /* Register with MAL */
2604 dev->commac.ops = &emac_commac_ops;
2605 dev->commac.dev = dev;
2606 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2607 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2608 err = mal_register_commac(dev->mal, &dev->commac);
2610 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2611 np->full_name, dev->mal_dev->node->full_name);
2614 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2615 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2617 /* Get pointers to BD rings */
2619 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2621 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2623 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2624 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2627 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2628 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2630 /* Attach to ZMII, if needed */
2631 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2632 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2633 goto err_unreg_commac;
2635 /* Attach to RGMII, if needed */
2636 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2637 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2638 goto err_detach_zmii;
2640 /* Attach to TAH, if needed */
2641 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2642 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2643 goto err_detach_rgmii;
2645 /* Set some link defaults before we can find out real parameters */
2646 dev->phy.speed = SPEED_100;
2647 dev->phy.duplex = DUPLEX_FULL;
2648 dev->phy.autoneg = AUTONEG_DISABLE;
2649 dev->phy.pause = dev->phy.asym_pause = 0;
2650 dev->stop_timeout = STOP_TIMEOUT_100;
2651 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2653 /* Find PHY if any */
2654 err = emac_init_phy(dev);
2656 goto err_detach_tah;
2658 /* Fill in the driver function table */
2659 ndev->open = &emac_open;
2660 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2662 ndev->hard_start_xmit = &emac_start_xmit_sg;
2663 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2666 ndev->hard_start_xmit = &emac_start_xmit;
2667 ndev->tx_timeout = &emac_tx_timeout;
2668 ndev->watchdog_timeo = 5 * HZ;
2669 ndev->stop = &emac_close;
2670 ndev->get_stats = &emac_stats;
2671 ndev->set_multicast_list = &emac_set_multicast_list;
2672 ndev->do_ioctl = &emac_ioctl;
2673 if (emac_phy_supports_gige(dev->phy_mode)) {
2674 ndev->change_mtu = &emac_change_mtu;
2675 dev->commac.ops = &emac_commac_sg_ops;
2677 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2679 netif_carrier_off(ndev);
2680 netif_stop_queue(ndev);
2682 err = register_netdev(ndev);
2684 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2685 np->full_name, err);
2686 goto err_detach_tah;
2689 /* Set our drvdata last as we don't want them visible until we are
2693 dev_set_drvdata(&ofdev->dev, dev);
2695 /* There's a new kid in town ! Let's tell everybody */
2696 wake_up_all(&emac_probe_wait);
2700 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2701 ndev->name, dev->cell_index, np->full_name,
2702 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2703 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2705 if (dev->phy.address >= 0)
2706 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2707 dev->phy.def->name, dev->phy.address);
2709 emac_dbg_register(dev);
2714 /* I have a bad feeling about this ... */
2717 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2718 tah_detach(dev->tah_dev, dev->tah_port);
2720 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2721 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2723 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2724 zmii_detach(dev->zmii_dev, dev->zmii_port);
2726 mal_unregister_commac(dev->mal, &dev->commac);
2730 iounmap(dev->emacp);
2732 if (dev->wol_irq != NO_IRQ)
2733 irq_dispose_mapping(dev->wol_irq);
2734 if (dev->emac_irq != NO_IRQ)
2735 irq_dispose_mapping(dev->emac_irq);
2739 /* if we were on the bootlist, remove us as we won't show up and
2740 * wake up all waiters to notify them in case they were waiting
2745 wake_up_all(&emac_probe_wait);
2750 static int __devexit emac_remove(struct of_device *ofdev)
2752 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2754 DBG(dev, "remove" NL);
2756 dev_set_drvdata(&ofdev->dev, NULL);
2758 unregister_netdev(dev->ndev);
2760 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2761 tah_detach(dev->tah_dev, dev->tah_port);
2762 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2763 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2764 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2765 zmii_detach(dev->zmii_dev, dev->zmii_port);
2767 mal_unregister_commac(dev->mal, &dev->commac);
2770 emac_dbg_unregister(dev);
2771 iounmap(dev->emacp);
2773 if (dev->wol_irq != NO_IRQ)
2774 irq_dispose_mapping(dev->wol_irq);
2775 if (dev->emac_irq != NO_IRQ)
2776 irq_dispose_mapping(dev->emac_irq);
2783 /* XXX Features in here should be replaced by properties... */
2784 static struct of_device_id emac_match[] =
2788 .compatible = "ibm,emac",
2792 .compatible = "ibm,emac4",
2797 static struct of_platform_driver emac_driver = {
2799 .match_table = emac_match,
2801 .probe = emac_probe,
2802 .remove = emac_remove,
2805 static void __init emac_make_bootlist(void)
2807 struct device_node *np = NULL;
2808 int j, max, i = 0, k;
2809 int cell_indices[EMAC_BOOT_LIST_SIZE];
2812 while((np = of_find_all_nodes(np)) != NULL) {
2815 if (of_match_node(emac_match, np) == NULL)
2817 if (of_get_property(np, "unused", NULL))
2819 idx = of_get_property(np, "cell-index", NULL);
2822 cell_indices[i] = *idx;
2823 emac_boot_list[i++] = of_node_get(np);
2824 if (i >= EMAC_BOOT_LIST_SIZE) {
2831 /* Bubble sort them (doh, what a creative algorithm :-) */
2832 for (i = 0; max > 1 && (i < (max - 1)); i++)
2833 for (j = i; j < max; j++) {
2834 if (cell_indices[i] > cell_indices[j]) {
2835 np = emac_boot_list[i];
2836 emac_boot_list[i] = emac_boot_list[j];
2837 emac_boot_list[j] = np;
2838 k = cell_indices[i];
2839 cell_indices[i] = cell_indices[j];
2840 cell_indices[j] = k;
2845 static int __init emac_init(void)
2849 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2851 /* Init debug stuff */
2854 /* Build EMAC boot list */
2855 emac_make_bootlist();
2857 /* Init submodules */
2870 rc = of_register_platform_driver(&emac_driver);
2888 static void __exit emac_exit(void)
2892 of_unregister_platform_driver(&emac_driver);
2900 /* Destroy EMAC boot list */
2901 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2902 if (emac_boot_list[i])
2903 of_node_put(emac_boot_list[i]);
2906 module_init(emac_init);
2907 module_exit(emac_exit);