2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
49 * Lack of dma_unmap_???? calls is intentional.
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
65 #define DRV_NAME "emac"
66 #define DRV_VERSION "3.54"
67 #define DRV_DESC "PPC 4xx OCP EMAC driver"
69 MODULE_DESCRIPTION(DRV_DESC);
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
72 MODULE_LICENSE("GPL");
75 * PPC64 doesn't (yet) have a cacheable_memcpy
78 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
84 /* If packet size is less than this number, we allocate small skb and copy packet
85 * contents into it instead of just sending original big skb up
87 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
89 /* Since multiple EMACs share MDIO lines in various ways, we need
90 * to avoid re-using the same PHY ID in cases where the arch didn't
91 * setup precise phy_map entries
93 * XXX This is something that needs to be reworked as we can have multiple
94 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
95 * probably require in that case to have explicit PHY IDs in the device-tree
97 static u32 busy_phy_map;
98 static DEFINE_MUTEX(emac_phy_map_lock);
100 /* This is the wait queue used to wait on any event related to probe, that
101 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105 /* Having stable interface names is a doomed idea. However, it would be nice
106 * if we didn't have completely random interface names at boot too :-) It's
107 * just a matter of making everybody's life easier. Since we are doing
108 * threaded probing, it's a bit harder though. The base idea here is that
109 * we make up a list of all emacs in the device-tree before we register the
110 * driver. Every emac will then wait for the previous one in the list to
111 * initialize before itself. We should also keep that list ordered by
113 * That list is only 4 entries long, meaning that additional EMACs don't
114 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117 #define EMAC_BOOT_LIST_SIZE 4
118 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120 /* How long should I wait for dependent devices ? */
121 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
123 /* I don't want to litter system log with timeout errors
124 * when we have brain-damaged PHY.
126 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
133 /* PHY polling intervals */
134 #define PHY_POLL_LINK_ON HZ
135 #define PHY_POLL_LINK_OFF (HZ / 5)
137 /* Graceful stop timeouts in us.
138 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
140 #define STOP_TIMEOUT_10 1230
141 #define STOP_TIMEOUT_100 124
142 #define STOP_TIMEOUT_1000 13
143 #define STOP_TIMEOUT_1000_JUMBO 73
145 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
146 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
147 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
148 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
149 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
150 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
151 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
152 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
153 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
154 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
155 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
156 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
157 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
158 "tx_bd_excessive_collisions", "tx_bd_late_collision",
159 "tx_bd_multple_collisions", "tx_bd_single_collision",
160 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
164 static irqreturn_t emac_irq(int irq, void *dev_instance);
165 static void emac_clean_tx_ring(struct emac_instance *dev);
166 static void __emac_set_multicast_list(struct emac_instance *dev);
168 static inline int emac_phy_supports_gige(int phy_mode)
170 return phy_mode == PHY_MODE_GMII ||
171 phy_mode == PHY_MODE_RGMII ||
172 phy_mode == PHY_MODE_TBI ||
173 phy_mode == PHY_MODE_RTBI;
176 static inline int emac_phy_gpcs(int phy_mode)
178 return phy_mode == PHY_MODE_TBI ||
179 phy_mode == PHY_MODE_RTBI;
182 static inline void emac_tx_enable(struct emac_instance *dev)
184 struct emac_regs __iomem *p = dev->emacp;
187 DBG(dev, "tx_enable" NL);
189 r = in_be32(&p->mr0);
190 if (!(r & EMAC_MR0_TXE))
191 out_be32(&p->mr0, r | EMAC_MR0_TXE);
194 static void emac_tx_disable(struct emac_instance *dev)
196 struct emac_regs __iomem *p = dev->emacp;
199 DBG(dev, "tx_disable" NL);
201 r = in_be32(&p->mr0);
202 if (r & EMAC_MR0_TXE) {
203 int n = dev->stop_timeout;
204 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
205 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
210 emac_report_timeout_error(dev, "TX disable timeout");
214 static void emac_rx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
222 DBG(dev, "rx_enable" NL);
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_RXE)) {
226 if (unlikely(!(r & EMAC_MR0_RXI))) {
227 /* Wait if previous async disable is still in progress */
228 int n = dev->stop_timeout;
229 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
234 emac_report_timeout_error(dev,
235 "RX disable timeout");
237 out_be32(&p->mr0, r | EMAC_MR0_RXE);
243 static void emac_rx_disable(struct emac_instance *dev)
245 struct emac_regs __iomem *p = dev->emacp;
248 DBG(dev, "rx_disable" NL);
250 r = in_be32(&p->mr0);
251 if (r & EMAC_MR0_RXE) {
252 int n = dev->stop_timeout;
253 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
254 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
259 emac_report_timeout_error(dev, "RX disable timeout");
263 static inline void emac_netif_stop(struct emac_instance *dev)
265 netif_tx_lock_bh(dev->ndev);
267 netif_tx_unlock_bh(dev->ndev);
268 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
269 mal_poll_disable(dev->mal, &dev->commac);
270 netif_tx_disable(dev->ndev);
273 static inline void emac_netif_start(struct emac_instance *dev)
275 netif_tx_lock_bh(dev->ndev);
277 if (dev->mcast_pending && netif_running(dev->ndev))
278 __emac_set_multicast_list(dev);
279 netif_tx_unlock_bh(dev->ndev);
281 netif_wake_queue(dev->ndev);
283 /* NOTE: unconditional netif_wake_queue is only appropriate
284 * so long as all callers are assured to have free tx slots
285 * (taken from tg3... though the case where that is wrong is
286 * not terribly harmful)
288 mal_poll_enable(dev->mal, &dev->commac);
291 static inline void emac_rx_disable_async(struct emac_instance *dev)
293 struct emac_regs __iomem *p = dev->emacp;
296 DBG(dev, "rx_disable_async" NL);
298 r = in_be32(&p->mr0);
299 if (r & EMAC_MR0_RXE)
300 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
303 static int emac_reset(struct emac_instance *dev)
305 struct emac_regs __iomem *p = dev->emacp;
308 DBG(dev, "reset" NL);
310 if (!dev->reset_failed) {
311 /* 40x erratum suggests stopping RX channel before reset,
314 emac_rx_disable(dev);
315 emac_tx_disable(dev);
318 out_be32(&p->mr0, EMAC_MR0_SRST);
319 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
323 dev->reset_failed = 0;
326 emac_report_timeout_error(dev, "reset timeout");
327 dev->reset_failed = 1;
332 static void emac_hash_mc(struct emac_instance *dev)
334 struct emac_regs __iomem *p = dev->emacp;
336 struct dev_mc_list *dmi;
338 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
340 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
342 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
343 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
344 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
346 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
347 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
349 out_be32(&p->gaht1, gaht[0]);
350 out_be32(&p->gaht2, gaht[1]);
351 out_be32(&p->gaht3, gaht[2]);
352 out_be32(&p->gaht4, gaht[3]);
355 static inline u32 emac_iff2rmr(struct net_device *ndev)
357 struct emac_instance *dev = netdev_priv(ndev);
360 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
362 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
367 if (ndev->flags & IFF_PROMISC)
369 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
371 else if (ndev->mc_count > 0)
377 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
379 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
381 DBG2(dev, "__emac_calc_base_mr1" NL);
385 ret |= EMAC_MR1_TFS_2K;
388 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
389 dev->ndev->name, tx_size);
394 ret |= EMAC_MR1_RFS_16K;
397 ret |= EMAC_MR1_RFS_4K;
400 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
401 dev->ndev->name, rx_size);
407 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
409 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
410 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
412 DBG2(dev, "__emac4_calc_base_mr1" NL);
416 ret |= EMAC4_MR1_TFS_4K;
419 ret |= EMAC4_MR1_TFS_2K;
422 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
423 dev->ndev->name, tx_size);
428 ret |= EMAC4_MR1_RFS_16K;
431 ret |= EMAC4_MR1_RFS_4K;
434 ret |= EMAC4_MR1_RFS_2K;
437 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
438 dev->ndev->name, rx_size);
444 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
446 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
447 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
448 __emac_calc_base_mr1(dev, tx_size, rx_size);
451 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
453 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
454 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
456 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
459 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
460 unsigned int low, unsigned int high)
462 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
463 return (low << 22) | ( (high & 0x3ff) << 6);
465 return (low << 23) | ( (high & 0x1ff) << 7);
468 static int emac_configure(struct emac_instance *dev)
470 struct emac_regs __iomem *p = dev->emacp;
471 struct net_device *ndev = dev->ndev;
472 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
475 DBG(dev, "configure" NL);
478 out_be32(&p->mr1, in_be32(&p->mr1)
479 | EMAC_MR1_FDE | EMAC_MR1_ILE);
481 } else if (emac_reset(dev) < 0)
484 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
485 tah_reset(dev->tah_dev);
487 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
488 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
490 /* Default fifo sizes */
491 tx_size = dev->tx_fifo_size;
492 rx_size = dev->rx_fifo_size;
494 /* No link, force loopback */
496 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
498 /* Check for full duplex */
499 else if (dev->phy.duplex == DUPLEX_FULL)
500 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
502 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
503 dev->stop_timeout = STOP_TIMEOUT_10;
504 switch (dev->phy.speed) {
506 if (emac_phy_gpcs(dev->phy.mode)) {
507 mr1 |= EMAC_MR1_MF_1000GPCS |
508 EMAC_MR1_MF_IPPA(dev->phy.address);
510 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
511 * identify this GPCS PHY later.
513 out_be32(&p->ipcr, 0xdeadbeef);
515 mr1 |= EMAC_MR1_MF_1000;
517 /* Extended fifo sizes */
518 tx_size = dev->tx_fifo_size_gige;
519 rx_size = dev->rx_fifo_size_gige;
521 if (dev->ndev->mtu > ETH_DATA_LEN) {
522 mr1 |= EMAC_MR1_JPSM;
523 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
525 dev->stop_timeout = STOP_TIMEOUT_1000;
528 mr1 |= EMAC_MR1_MF_100;
529 dev->stop_timeout = STOP_TIMEOUT_100;
531 default: /* make gcc happy */
535 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
536 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
538 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
539 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
541 /* on 40x erratum forces us to NOT use integrated flow control,
542 * let's hope it works on 44x ;)
544 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
545 dev->phy.duplex == DUPLEX_FULL) {
547 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
548 else if (dev->phy.asym_pause)
552 /* Add base settings & fifo sizes & program MR1 */
553 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
554 out_be32(&p->mr1, mr1);
556 /* Set individual MAC address */
557 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
558 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
559 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
562 /* VLAN Tag Protocol ID */
563 out_be32(&p->vtpid, 0x8100);
565 /* Receive mode register */
566 r = emac_iff2rmr(ndev);
567 if (r & EMAC_RMR_MAE)
569 out_be32(&p->rmr, r);
571 /* FIFOs thresholds */
572 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
573 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
574 tx_size / 2 / dev->fifo_entry_size);
576 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
577 tx_size / 2 / dev->fifo_entry_size);
578 out_be32(&p->tmr1, r);
579 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
581 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
582 there should be still enough space in FIFO to allow the our link
583 partner time to process this frame and also time to send PAUSE
586 Here is the worst case scenario for the RX FIFO "headroom"
587 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
589 1) One maximum-length frame on TX 1522 bytes
590 2) One PAUSE frame time 64 bytes
591 3) PAUSE frame decode time allowance 64 bytes
592 4) One maximum-length frame on RX 1522 bytes
593 5) Round-trip propagation delay of the link (100Mb) 15 bytes
597 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
598 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
600 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
601 rx_size / 4 / dev->fifo_entry_size);
602 out_be32(&p->rwmr, r);
604 /* Set PAUSE timer to the maximum */
605 out_be32(&p->ptr, 0xffff);
608 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
609 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
610 EMAC_ISR_IRE | EMAC_ISR_TE;
611 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
612 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
614 out_be32(&p->iser, r);
616 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
617 if (emac_phy_gpcs(dev->phy.mode))
618 emac_mii_reset_phy(&dev->phy);
623 static void emac_reinitialize(struct emac_instance *dev)
625 DBG(dev, "reinitialize" NL);
627 emac_netif_stop(dev);
628 if (!emac_configure(dev)) {
632 emac_netif_start(dev);
635 static void emac_full_tx_reset(struct emac_instance *dev)
637 DBG(dev, "full_tx_reset" NL);
639 emac_tx_disable(dev);
640 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
641 emac_clean_tx_ring(dev);
642 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
646 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
651 static void emac_reset_work(struct work_struct *work)
653 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
655 DBG(dev, "reset_work" NL);
657 mutex_lock(&dev->link_lock);
659 emac_netif_stop(dev);
660 emac_full_tx_reset(dev);
661 emac_netif_start(dev);
663 mutex_unlock(&dev->link_lock);
666 static void emac_tx_timeout(struct net_device *ndev)
668 struct emac_instance *dev = netdev_priv(ndev);
670 DBG(dev, "tx_timeout" NL);
672 schedule_work(&dev->reset_work);
676 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
678 int done = !!(stacr & EMAC_STACR_OC);
680 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
686 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
688 struct emac_regs __iomem *p = dev->emacp;
690 int n, err = -ETIMEDOUT;
692 mutex_lock(&dev->mdio_lock);
694 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
696 /* Enable proper MDIO port */
697 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
698 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
699 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
700 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
702 /* Wait for management interface to become idle */
704 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
707 DBG2(dev, " -> timeout wait idle\n");
712 /* Issue read command */
713 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
714 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
716 r = EMAC_STACR_BASE(dev->opb_bus_freq);
717 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
719 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
720 r |= EMACX_STACR_STAC_READ;
722 r |= EMAC_STACR_STAC_READ;
723 r |= (reg & EMAC_STACR_PRA_MASK)
724 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
725 out_be32(&p->stacr, r);
727 /* Wait for read to complete */
729 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
732 DBG2(dev, " -> timeout wait complete\n");
737 if (unlikely(r & EMAC_STACR_PHYE)) {
738 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
743 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
745 DBG2(dev, "mdio_read -> %04x" NL, r);
748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
749 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
750 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
751 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
752 mutex_unlock(&dev->mdio_lock);
754 return err == 0 ? r : err;
757 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
760 struct emac_regs __iomem *p = dev->emacp;
762 int n, err = -ETIMEDOUT;
764 mutex_lock(&dev->mdio_lock);
766 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
768 /* Enable proper MDIO port */
769 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
770 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
771 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
772 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
774 /* Wait for management interface to be idle */
776 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
779 DBG2(dev, " -> timeout wait idle\n");
784 /* Issue write command */
785 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
786 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
788 r = EMAC_STACR_BASE(dev->opb_bus_freq);
789 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
791 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
792 r |= EMACX_STACR_STAC_WRITE;
794 r |= EMAC_STACR_STAC_WRITE;
795 r |= (reg & EMAC_STACR_PRA_MASK) |
796 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
797 (val << EMAC_STACR_PHYD_SHIFT);
798 out_be32(&p->stacr, r);
800 /* Wait for write to complete */
802 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
805 DBG2(dev, " -> timeout wait complete\n");
811 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
813 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
814 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
815 mutex_unlock(&dev->mdio_lock);
818 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
820 struct emac_instance *dev = netdev_priv(ndev);
823 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
828 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
830 struct emac_instance *dev = netdev_priv(ndev);
832 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
833 (u8) id, (u8) reg, (u16) val);
837 static void __emac_set_multicast_list(struct emac_instance *dev)
839 struct emac_regs __iomem *p = dev->emacp;
840 u32 rmr = emac_iff2rmr(dev->ndev);
842 DBG(dev, "__multicast %08x" NL, rmr);
844 /* I decided to relax register access rules here to avoid
847 * There is a real problem with EMAC4 core if we use MWSW_001 bit
848 * in MR1 register and do a full EMAC reset.
849 * One TX BD status update is delayed and, after EMAC reset, it
850 * never happens, resulting in TX hung (it'll be recovered by TX
851 * timeout handler eventually, but this is just gross).
852 * So we either have to do full TX reset or try to cheat here :)
854 * The only required change is to RX mode register, so I *think* all
855 * we need is just to stop RX channel. This seems to work on all
858 * If we need the full reset, we might just trigger the workqueue
859 * and do it async... a bit nasty but should work --BenH
861 dev->mcast_pending = 0;
862 emac_rx_disable(dev);
863 if (rmr & EMAC_RMR_MAE)
865 out_be32(&p->rmr, rmr);
870 static void emac_set_multicast_list(struct net_device *ndev)
872 struct emac_instance *dev = netdev_priv(ndev);
874 DBG(dev, "multicast" NL);
876 BUG_ON(!netif_running(dev->ndev));
879 dev->mcast_pending = 1;
882 __emac_set_multicast_list(dev);
885 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
887 int rx_sync_size = emac_rx_sync_size(new_mtu);
888 int rx_skb_size = emac_rx_skb_size(new_mtu);
891 mutex_lock(&dev->link_lock);
892 emac_netif_stop(dev);
893 emac_rx_disable(dev);
894 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
896 if (dev->rx_sg_skb) {
897 ++dev->estats.rx_dropped_resize;
898 dev_kfree_skb(dev->rx_sg_skb);
899 dev->rx_sg_skb = NULL;
902 /* Make a first pass over RX ring and mark BDs ready, dropping
903 * non-processed packets on the way. We need this as a separate pass
904 * to simplify error recovery in the case of allocation failure later.
906 for (i = 0; i < NUM_RX_BUFF; ++i) {
907 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
908 ++dev->estats.rx_dropped_resize;
910 dev->rx_desc[i].data_len = 0;
911 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
912 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
915 /* Reallocate RX ring only if bigger skb buffers are required */
916 if (rx_skb_size <= dev->rx_skb_size)
919 /* Second pass, allocate new skbs */
920 for (i = 0; i < NUM_RX_BUFF; ++i) {
921 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
927 BUG_ON(!dev->rx_skb[i]);
928 dev_kfree_skb(dev->rx_skb[i]);
930 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
931 dev->rx_desc[i].data_ptr =
932 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
933 DMA_FROM_DEVICE) + 2;
934 dev->rx_skb[i] = skb;
937 /* Check if we need to change "Jumbo" bit in MR1 */
938 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
939 /* This is to prevent starting RX channel in emac_rx_enable() */
940 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
942 dev->ndev->mtu = new_mtu;
943 emac_full_tx_reset(dev);
946 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
949 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
951 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
953 emac_netif_start(dev);
954 mutex_unlock(&dev->link_lock);
959 /* Process ctx, rtnl_lock semaphore */
960 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
962 struct emac_instance *dev = netdev_priv(ndev);
965 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
968 DBG(dev, "change_mtu(%d)" NL, new_mtu);
970 if (netif_running(ndev)) {
971 /* Check if we really need to reinitalize RX ring */
972 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
973 ret = emac_resize_rx_ring(dev, new_mtu);
978 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
979 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
985 static void emac_clean_tx_ring(struct emac_instance *dev)
989 for (i = 0; i < NUM_TX_BUFF; ++i) {
990 if (dev->tx_skb[i]) {
991 dev_kfree_skb(dev->tx_skb[i]);
992 dev->tx_skb[i] = NULL;
993 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
994 ++dev->estats.tx_dropped;
996 dev->tx_desc[i].ctrl = 0;
997 dev->tx_desc[i].data_ptr = 0;
1001 static void emac_clean_rx_ring(struct emac_instance *dev)
1005 for (i = 0; i < NUM_RX_BUFF; ++i)
1006 if (dev->rx_skb[i]) {
1007 dev->rx_desc[i].ctrl = 0;
1008 dev_kfree_skb(dev->rx_skb[i]);
1009 dev->rx_skb[i] = NULL;
1010 dev->rx_desc[i].data_ptr = 0;
1013 if (dev->rx_sg_skb) {
1014 dev_kfree_skb(dev->rx_sg_skb);
1015 dev->rx_sg_skb = NULL;
1019 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1022 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1026 dev->rx_skb[slot] = skb;
1027 dev->rx_desc[slot].data_len = 0;
1029 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1030 dev->rx_desc[slot].data_ptr =
1031 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1032 DMA_FROM_DEVICE) + 2;
1034 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1035 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1040 static void emac_print_link_status(struct emac_instance *dev)
1042 if (netif_carrier_ok(dev->ndev))
1043 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1044 dev->ndev->name, dev->phy.speed,
1045 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1046 dev->phy.pause ? ", pause enabled" :
1047 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1049 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1052 /* Process ctx, rtnl_lock semaphore */
1053 static int emac_open(struct net_device *ndev)
1055 struct emac_instance *dev = netdev_priv(ndev);
1058 DBG(dev, "open" NL);
1060 /* Setup error IRQ handler */
1061 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1063 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1064 ndev->name, dev->emac_irq);
1068 /* Allocate RX ring */
1069 for (i = 0; i < NUM_RX_BUFF; ++i)
1070 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1071 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1076 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1077 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1078 dev->rx_sg_skb = NULL;
1080 mutex_lock(&dev->link_lock);
1083 /* Start PHY polling now.
1085 if (dev->phy.address >= 0) {
1086 int link_poll_interval;
1087 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1088 dev->phy.def->ops->read_link(&dev->phy);
1089 netif_carrier_on(dev->ndev);
1090 link_poll_interval = PHY_POLL_LINK_ON;
1092 netif_carrier_off(dev->ndev);
1093 link_poll_interval = PHY_POLL_LINK_OFF;
1095 dev->link_polling = 1;
1097 schedule_delayed_work(&dev->link_work, link_poll_interval);
1098 emac_print_link_status(dev);
1100 netif_carrier_on(dev->ndev);
1102 emac_configure(dev);
1103 mal_poll_add(dev->mal, &dev->commac);
1104 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1105 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1106 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1107 emac_tx_enable(dev);
1108 emac_rx_enable(dev);
1109 emac_netif_start(dev);
1111 mutex_unlock(&dev->link_lock);
1115 emac_clean_rx_ring(dev);
1116 free_irq(dev->emac_irq, dev);
1123 static int emac_link_differs(struct emac_instance *dev)
1125 u32 r = in_be32(&dev->emacp->mr1);
1127 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1128 int speed, pause, asym_pause;
1130 if (r & EMAC_MR1_MF_1000)
1132 else if (r & EMAC_MR1_MF_100)
1137 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1138 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1147 pause = asym_pause = 0;
1149 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1150 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1154 static void emac_link_timer(struct work_struct *work)
1156 struct emac_instance *dev =
1157 container_of((struct delayed_work *)work,
1158 struct emac_instance, link_work);
1159 int link_poll_interval;
1161 mutex_lock(&dev->link_lock);
1162 DBG2(dev, "link timer" NL);
1167 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1168 if (!netif_carrier_ok(dev->ndev)) {
1169 /* Get new link parameters */
1170 dev->phy.def->ops->read_link(&dev->phy);
1172 netif_carrier_on(dev->ndev);
1173 emac_netif_stop(dev);
1174 emac_full_tx_reset(dev);
1175 emac_netif_start(dev);
1176 emac_print_link_status(dev);
1178 link_poll_interval = PHY_POLL_LINK_ON;
1180 if (netif_carrier_ok(dev->ndev)) {
1181 netif_carrier_off(dev->ndev);
1182 netif_tx_disable(dev->ndev);
1183 emac_reinitialize(dev);
1184 emac_print_link_status(dev);
1186 link_poll_interval = PHY_POLL_LINK_OFF;
1188 schedule_delayed_work(&dev->link_work, link_poll_interval);
1190 mutex_unlock(&dev->link_lock);
1193 static void emac_force_link_update(struct emac_instance *dev)
1195 netif_carrier_off(dev->ndev);
1197 if (dev->link_polling) {
1198 cancel_rearming_delayed_work(&dev->link_work);
1199 if (dev->link_polling)
1200 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1204 /* Process ctx, rtnl_lock semaphore */
1205 static int emac_close(struct net_device *ndev)
1207 struct emac_instance *dev = netdev_priv(ndev);
1209 DBG(dev, "close" NL);
1211 if (dev->phy.address >= 0) {
1212 dev->link_polling = 0;
1213 cancel_rearming_delayed_work(&dev->link_work);
1215 mutex_lock(&dev->link_lock);
1216 emac_netif_stop(dev);
1218 mutex_unlock(&dev->link_lock);
1220 emac_rx_disable(dev);
1221 emac_tx_disable(dev);
1222 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1223 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1224 mal_poll_del(dev->mal, &dev->commac);
1226 emac_clean_tx_ring(dev);
1227 emac_clean_rx_ring(dev);
1229 free_irq(dev->emac_irq, dev);
1234 static inline u16 emac_tx_csum(struct emac_instance *dev,
1235 struct sk_buff *skb)
1237 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1238 skb->ip_summed == CHECKSUM_PARTIAL)) {
1239 ++dev->stats.tx_packets_csum;
1240 return EMAC_TX_CTRL_TAH_CSUM;
1245 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1247 struct emac_regs __iomem *p = dev->emacp;
1248 struct net_device *ndev = dev->ndev;
1250 /* Send the packet out. If the if makes a significant perf
1251 * difference, then we can store the TMR0 value in "dev"
1254 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1255 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1257 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1259 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1260 netif_stop_queue(ndev);
1261 DBG2(dev, "stopped TX queue" NL);
1264 ndev->trans_start = jiffies;
1265 ++dev->stats.tx_packets;
1266 dev->stats.tx_bytes += len;
1272 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1274 struct emac_instance *dev = netdev_priv(ndev);
1275 unsigned int len = skb->len;
1278 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1279 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1281 slot = dev->tx_slot++;
1282 if (dev->tx_slot == NUM_TX_BUFF) {
1284 ctrl |= MAL_TX_CTRL_WRAP;
1287 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1289 dev->tx_skb[slot] = skb;
1290 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1293 dev->tx_desc[slot].data_len = (u16) len;
1295 dev->tx_desc[slot].ctrl = ctrl;
1297 return emac_xmit_finish(dev, len);
1300 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1301 u32 pd, int len, int last, u16 base_ctrl)
1304 u16 ctrl = base_ctrl;
1305 int chunk = min(len, MAL_MAX_TX_SIZE);
1308 slot = (slot + 1) % NUM_TX_BUFF;
1311 ctrl |= MAL_TX_CTRL_LAST;
1312 if (slot == NUM_TX_BUFF - 1)
1313 ctrl |= MAL_TX_CTRL_WRAP;
1315 dev->tx_skb[slot] = NULL;
1316 dev->tx_desc[slot].data_ptr = pd;
1317 dev->tx_desc[slot].data_len = (u16) chunk;
1318 dev->tx_desc[slot].ctrl = ctrl;
1329 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1330 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1332 struct emac_instance *dev = netdev_priv(ndev);
1333 int nr_frags = skb_shinfo(skb)->nr_frags;
1334 int len = skb->len, chunk;
1339 /* This is common "fast" path */
1340 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1341 return emac_start_xmit(skb, ndev);
1343 len -= skb->data_len;
1345 /* Note, this is only an *estimation*, we can still run out of empty
1346 * slots because of the additional fragmentation into
1347 * MAL_MAX_TX_SIZE-sized chunks
1349 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1352 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1353 emac_tx_csum(dev, skb);
1354 slot = dev->tx_slot;
1357 dev->tx_skb[slot] = NULL;
1358 chunk = min(len, MAL_MAX_TX_SIZE);
1359 dev->tx_desc[slot].data_ptr = pd =
1360 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1361 dev->tx_desc[slot].data_len = (u16) chunk;
1364 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1367 for (i = 0; i < nr_frags; ++i) {
1368 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1371 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1374 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1377 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1381 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1383 /* Attach skb to the last slot so we don't release it too early */
1384 dev->tx_skb[slot] = skb;
1386 /* Send the packet out */
1387 if (dev->tx_slot == NUM_TX_BUFF - 1)
1388 ctrl |= MAL_TX_CTRL_WRAP;
1390 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1391 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1393 return emac_xmit_finish(dev, skb->len);
1396 /* Well, too bad. Our previous estimation was overly optimistic.
1399 while (slot != dev->tx_slot) {
1400 dev->tx_desc[slot].ctrl = 0;
1403 slot = NUM_TX_BUFF - 1;
1405 ++dev->estats.tx_undo;
1408 netif_stop_queue(ndev);
1409 DBG2(dev, "stopped TX queue" NL);
1414 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1416 struct emac_error_stats *st = &dev->estats;
1418 DBG(dev, "BD TX error %04x" NL, ctrl);
1421 if (ctrl & EMAC_TX_ST_BFCS)
1422 ++st->tx_bd_bad_fcs;
1423 if (ctrl & EMAC_TX_ST_LCS)
1424 ++st->tx_bd_carrier_loss;
1425 if (ctrl & EMAC_TX_ST_ED)
1426 ++st->tx_bd_excessive_deferral;
1427 if (ctrl & EMAC_TX_ST_EC)
1428 ++st->tx_bd_excessive_collisions;
1429 if (ctrl & EMAC_TX_ST_LC)
1430 ++st->tx_bd_late_collision;
1431 if (ctrl & EMAC_TX_ST_MC)
1432 ++st->tx_bd_multple_collisions;
1433 if (ctrl & EMAC_TX_ST_SC)
1434 ++st->tx_bd_single_collision;
1435 if (ctrl & EMAC_TX_ST_UR)
1436 ++st->tx_bd_underrun;
1437 if (ctrl & EMAC_TX_ST_SQE)
1441 static void emac_poll_tx(void *param)
1443 struct emac_instance *dev = param;
1446 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1448 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1449 bad_mask = EMAC_IS_BAD_TX_TAH;
1451 bad_mask = EMAC_IS_BAD_TX;
1453 netif_tx_lock_bh(dev->ndev);
1456 int slot = dev->ack_slot, n = 0;
1458 ctrl = dev->tx_desc[slot].ctrl;
1459 if (!(ctrl & MAL_TX_CTRL_READY)) {
1460 struct sk_buff *skb = dev->tx_skb[slot];
1465 dev->tx_skb[slot] = NULL;
1467 slot = (slot + 1) % NUM_TX_BUFF;
1469 if (unlikely(ctrl & bad_mask))
1470 emac_parse_tx_error(dev, ctrl);
1476 dev->ack_slot = slot;
1477 if (netif_queue_stopped(dev->ndev) &&
1478 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1479 netif_wake_queue(dev->ndev);
1481 DBG2(dev, "tx %d pkts" NL, n);
1484 netif_tx_unlock_bh(dev->ndev);
1487 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1490 struct sk_buff *skb = dev->rx_skb[slot];
1492 DBG2(dev, "recycle %d %d" NL, slot, len);
1495 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1496 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1498 dev->rx_desc[slot].data_len = 0;
1500 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1501 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1504 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1506 struct emac_error_stats *st = &dev->estats;
1508 DBG(dev, "BD RX error %04x" NL, ctrl);
1511 if (ctrl & EMAC_RX_ST_OE)
1512 ++st->rx_bd_overrun;
1513 if (ctrl & EMAC_RX_ST_BP)
1514 ++st->rx_bd_bad_packet;
1515 if (ctrl & EMAC_RX_ST_RP)
1516 ++st->rx_bd_runt_packet;
1517 if (ctrl & EMAC_RX_ST_SE)
1518 ++st->rx_bd_short_event;
1519 if (ctrl & EMAC_RX_ST_AE)
1520 ++st->rx_bd_alignment_error;
1521 if (ctrl & EMAC_RX_ST_BFCS)
1522 ++st->rx_bd_bad_fcs;
1523 if (ctrl & EMAC_RX_ST_PTL)
1524 ++st->rx_bd_packet_too_long;
1525 if (ctrl & EMAC_RX_ST_ORE)
1526 ++st->rx_bd_out_of_range;
1527 if (ctrl & EMAC_RX_ST_IRE)
1528 ++st->rx_bd_in_range;
1531 static inline void emac_rx_csum(struct emac_instance *dev,
1532 struct sk_buff *skb, u16 ctrl)
1534 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1535 if (!ctrl && dev->tah_dev) {
1536 skb->ip_summed = CHECKSUM_UNNECESSARY;
1537 ++dev->stats.rx_packets_csum;
1542 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1544 if (likely(dev->rx_sg_skb != NULL)) {
1545 int len = dev->rx_desc[slot].data_len;
1546 int tot_len = dev->rx_sg_skb->len + len;
1548 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1549 ++dev->estats.rx_dropped_mtu;
1550 dev_kfree_skb(dev->rx_sg_skb);
1551 dev->rx_sg_skb = NULL;
1553 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1554 dev->rx_skb[slot]->data, len);
1555 skb_put(dev->rx_sg_skb, len);
1556 emac_recycle_rx_skb(dev, slot, len);
1560 emac_recycle_rx_skb(dev, slot, 0);
1564 /* NAPI poll context */
1565 static int emac_poll_rx(void *param, int budget)
1567 struct emac_instance *dev = param;
1568 int slot = dev->rx_slot, received = 0;
1570 DBG2(dev, "poll_rx(%d)" NL, budget);
1573 while (budget > 0) {
1575 struct sk_buff *skb;
1576 u16 ctrl = dev->rx_desc[slot].ctrl;
1578 if (ctrl & MAL_RX_CTRL_EMPTY)
1581 skb = dev->rx_skb[slot];
1583 len = dev->rx_desc[slot].data_len;
1585 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1588 ctrl &= EMAC_BAD_RX_MASK;
1589 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1590 emac_parse_rx_error(dev, ctrl);
1591 ++dev->estats.rx_dropped_error;
1592 emac_recycle_rx_skb(dev, slot, 0);
1597 if (len && len < EMAC_RX_COPY_THRESH) {
1598 struct sk_buff *copy_skb =
1599 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1600 if (unlikely(!copy_skb))
1603 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1604 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1606 emac_recycle_rx_skb(dev, slot, len);
1608 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1613 skb->dev = dev->ndev;
1614 skb->protocol = eth_type_trans(skb, dev->ndev);
1615 emac_rx_csum(dev, skb, ctrl);
1617 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1618 ++dev->estats.rx_dropped_stack;
1620 ++dev->stats.rx_packets;
1622 dev->stats.rx_bytes += len;
1623 slot = (slot + 1) % NUM_RX_BUFF;
1628 if (ctrl & MAL_RX_CTRL_FIRST) {
1629 BUG_ON(dev->rx_sg_skb);
1630 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1631 DBG(dev, "rx OOM %d" NL, slot);
1632 ++dev->estats.rx_dropped_oom;
1633 emac_recycle_rx_skb(dev, slot, 0);
1635 dev->rx_sg_skb = skb;
1638 } else if (!emac_rx_sg_append(dev, slot) &&
1639 (ctrl & MAL_RX_CTRL_LAST)) {
1641 skb = dev->rx_sg_skb;
1642 dev->rx_sg_skb = NULL;
1644 ctrl &= EMAC_BAD_RX_MASK;
1645 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1646 emac_parse_rx_error(dev, ctrl);
1647 ++dev->estats.rx_dropped_error;
1655 DBG(dev, "rx OOM %d" NL, slot);
1656 /* Drop the packet and recycle skb */
1657 ++dev->estats.rx_dropped_oom;
1658 emac_recycle_rx_skb(dev, slot, 0);
1663 DBG2(dev, "rx %d BDs" NL, received);
1664 dev->rx_slot = slot;
1667 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1669 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1670 DBG2(dev, "rx restart" NL);
1675 if (dev->rx_sg_skb) {
1676 DBG2(dev, "dropping partial rx packet" NL);
1677 ++dev->estats.rx_dropped_error;
1678 dev_kfree_skb(dev->rx_sg_skb);
1679 dev->rx_sg_skb = NULL;
1682 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1683 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1684 emac_rx_enable(dev);
1690 /* NAPI poll context */
1691 static int emac_peek_rx(void *param)
1693 struct emac_instance *dev = param;
1695 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1698 /* NAPI poll context */
1699 static int emac_peek_rx_sg(void *param)
1701 struct emac_instance *dev = param;
1703 int slot = dev->rx_slot;
1705 u16 ctrl = dev->rx_desc[slot].ctrl;
1706 if (ctrl & MAL_RX_CTRL_EMPTY)
1708 else if (ctrl & MAL_RX_CTRL_LAST)
1711 slot = (slot + 1) % NUM_RX_BUFF;
1713 /* I'm just being paranoid here :) */
1714 if (unlikely(slot == dev->rx_slot))
1720 static void emac_rxde(void *param)
1722 struct emac_instance *dev = param;
1724 ++dev->estats.rx_stopped;
1725 emac_rx_disable_async(dev);
1729 static irqreturn_t emac_irq(int irq, void *dev_instance)
1731 struct emac_instance *dev = dev_instance;
1732 struct emac_regs __iomem *p = dev->emacp;
1733 struct emac_error_stats *st = &dev->estats;
1736 spin_lock(&dev->lock);
1738 isr = in_be32(&p->isr);
1739 out_be32(&p->isr, isr);
1741 DBG(dev, "isr = %08x" NL, isr);
1743 if (isr & EMAC4_ISR_TXPE)
1745 if (isr & EMAC4_ISR_RXPE)
1747 if (isr & EMAC4_ISR_TXUE)
1749 if (isr & EMAC4_ISR_RXOE)
1750 ++st->rx_fifo_overrun;
1751 if (isr & EMAC_ISR_OVR)
1753 if (isr & EMAC_ISR_BP)
1754 ++st->rx_bad_packet;
1755 if (isr & EMAC_ISR_RP)
1756 ++st->rx_runt_packet;
1757 if (isr & EMAC_ISR_SE)
1758 ++st->rx_short_event;
1759 if (isr & EMAC_ISR_ALE)
1760 ++st->rx_alignment_error;
1761 if (isr & EMAC_ISR_BFCS)
1763 if (isr & EMAC_ISR_PTLE)
1764 ++st->rx_packet_too_long;
1765 if (isr & EMAC_ISR_ORE)
1766 ++st->rx_out_of_range;
1767 if (isr & EMAC_ISR_IRE)
1769 if (isr & EMAC_ISR_SQE)
1771 if (isr & EMAC_ISR_TE)
1774 spin_unlock(&dev->lock);
1779 static struct net_device_stats *emac_stats(struct net_device *ndev)
1781 struct emac_instance *dev = netdev_priv(ndev);
1782 struct emac_stats *st = &dev->stats;
1783 struct emac_error_stats *est = &dev->estats;
1784 struct net_device_stats *nst = &dev->nstats;
1785 unsigned long flags;
1787 DBG2(dev, "stats" NL);
1789 /* Compute "legacy" statistics */
1790 spin_lock_irqsave(&dev->lock, flags);
1791 nst->rx_packets = (unsigned long)st->rx_packets;
1792 nst->rx_bytes = (unsigned long)st->rx_bytes;
1793 nst->tx_packets = (unsigned long)st->tx_packets;
1794 nst->tx_bytes = (unsigned long)st->tx_bytes;
1795 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1796 est->rx_dropped_error +
1797 est->rx_dropped_resize +
1798 est->rx_dropped_mtu);
1799 nst->tx_dropped = (unsigned long)est->tx_dropped;
1801 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1802 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1803 est->rx_fifo_overrun +
1805 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1806 est->rx_alignment_error);
1807 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1809 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1810 est->rx_bd_short_event +
1811 est->rx_bd_packet_too_long +
1812 est->rx_bd_out_of_range +
1813 est->rx_bd_in_range +
1814 est->rx_runt_packet +
1815 est->rx_short_event +
1816 est->rx_packet_too_long +
1817 est->rx_out_of_range +
1820 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1821 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1823 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1824 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1825 est->tx_bd_excessive_collisions +
1826 est->tx_bd_late_collision +
1827 est->tx_bd_multple_collisions);
1828 spin_unlock_irqrestore(&dev->lock, flags);
1832 static struct mal_commac_ops emac_commac_ops = {
1833 .poll_tx = &emac_poll_tx,
1834 .poll_rx = &emac_poll_rx,
1835 .peek_rx = &emac_peek_rx,
1839 static struct mal_commac_ops emac_commac_sg_ops = {
1840 .poll_tx = &emac_poll_tx,
1841 .poll_rx = &emac_poll_rx,
1842 .peek_rx = &emac_peek_rx_sg,
1846 /* Ethtool support */
1847 static int emac_ethtool_get_settings(struct net_device *ndev,
1848 struct ethtool_cmd *cmd)
1850 struct emac_instance *dev = netdev_priv(ndev);
1852 cmd->supported = dev->phy.features;
1853 cmd->port = PORT_MII;
1854 cmd->phy_address = dev->phy.address;
1856 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1858 mutex_lock(&dev->link_lock);
1859 cmd->advertising = dev->phy.advertising;
1860 cmd->autoneg = dev->phy.autoneg;
1861 cmd->speed = dev->phy.speed;
1862 cmd->duplex = dev->phy.duplex;
1863 mutex_unlock(&dev->link_lock);
1868 static int emac_ethtool_set_settings(struct net_device *ndev,
1869 struct ethtool_cmd *cmd)
1871 struct emac_instance *dev = netdev_priv(ndev);
1872 u32 f = dev->phy.features;
1874 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1875 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1877 /* Basic sanity checks */
1878 if (dev->phy.address < 0)
1880 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1882 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1884 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1887 if (cmd->autoneg == AUTONEG_DISABLE) {
1888 switch (cmd->speed) {
1890 if (cmd->duplex == DUPLEX_HALF
1891 && !(f & SUPPORTED_10baseT_Half))
1893 if (cmd->duplex == DUPLEX_FULL
1894 && !(f & SUPPORTED_10baseT_Full))
1898 if (cmd->duplex == DUPLEX_HALF
1899 && !(f & SUPPORTED_100baseT_Half))
1901 if (cmd->duplex == DUPLEX_FULL
1902 && !(f & SUPPORTED_100baseT_Full))
1906 if (cmd->duplex == DUPLEX_HALF
1907 && !(f & SUPPORTED_1000baseT_Half))
1909 if (cmd->duplex == DUPLEX_FULL
1910 && !(f & SUPPORTED_1000baseT_Full))
1917 mutex_lock(&dev->link_lock);
1918 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1920 mutex_unlock(&dev->link_lock);
1923 if (!(f & SUPPORTED_Autoneg))
1926 mutex_lock(&dev->link_lock);
1927 dev->phy.def->ops->setup_aneg(&dev->phy,
1928 (cmd->advertising & f) |
1929 (dev->phy.advertising &
1931 ADVERTISED_Asym_Pause)));
1932 mutex_unlock(&dev->link_lock);
1934 emac_force_link_update(dev);
1939 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1940 struct ethtool_ringparam *rp)
1942 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1943 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1946 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1947 struct ethtool_pauseparam *pp)
1949 struct emac_instance *dev = netdev_priv(ndev);
1951 mutex_lock(&dev->link_lock);
1952 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1953 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1956 if (dev->phy.duplex == DUPLEX_FULL) {
1958 pp->rx_pause = pp->tx_pause = 1;
1959 else if (dev->phy.asym_pause)
1962 mutex_unlock(&dev->link_lock);
1965 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1967 struct emac_instance *dev = netdev_priv(ndev);
1969 return dev->tah_dev != NULL;
1972 static int emac_get_regs_len(struct emac_instance *dev)
1974 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1975 return sizeof(struct emac_ethtool_regs_subhdr) +
1976 EMAC4_ETHTOOL_REGS_SIZE;
1978 return sizeof(struct emac_ethtool_regs_subhdr) +
1979 EMAC_ETHTOOL_REGS_SIZE;
1982 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1984 struct emac_instance *dev = netdev_priv(ndev);
1987 size = sizeof(struct emac_ethtool_regs_hdr) +
1988 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1989 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1990 size += zmii_get_regs_len(dev->zmii_dev);
1991 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1992 size += rgmii_get_regs_len(dev->rgmii_dev);
1993 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1994 size += tah_get_regs_len(dev->tah_dev);
1999 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2001 struct emac_ethtool_regs_subhdr *hdr = buf;
2003 hdr->index = dev->cell_index;
2004 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2005 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2006 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2007 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2009 hdr->version = EMAC_ETHTOOL_REGS_VER;
2010 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2011 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2015 static void emac_ethtool_get_regs(struct net_device *ndev,
2016 struct ethtool_regs *regs, void *buf)
2018 struct emac_instance *dev = netdev_priv(ndev);
2019 struct emac_ethtool_regs_hdr *hdr = buf;
2021 hdr->components = 0;
2024 buf = mal_dump_regs(dev->mal, buf);
2025 buf = emac_dump_regs(dev, buf);
2026 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2027 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2028 buf = zmii_dump_regs(dev->zmii_dev, buf);
2030 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2031 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2032 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2034 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2035 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2036 buf = tah_dump_regs(dev->tah_dev, buf);
2040 static int emac_ethtool_nway_reset(struct net_device *ndev)
2042 struct emac_instance *dev = netdev_priv(ndev);
2045 DBG(dev, "nway_reset" NL);
2047 if (dev->phy.address < 0)
2050 mutex_lock(&dev->link_lock);
2051 if (!dev->phy.autoneg) {
2056 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2058 mutex_unlock(&dev->link_lock);
2059 emac_force_link_update(dev);
2063 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2065 return EMAC_ETHTOOL_STATS_COUNT;
2068 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2071 if (stringset == ETH_SS_STATS)
2072 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2075 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2076 struct ethtool_stats *estats,
2079 struct emac_instance *dev = netdev_priv(ndev);
2081 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2082 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2083 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2086 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2087 struct ethtool_drvinfo *info)
2089 struct emac_instance *dev = netdev_priv(ndev);
2091 strcpy(info->driver, "ibm_emac");
2092 strcpy(info->version, DRV_VERSION);
2093 info->fw_version[0] = '\0';
2094 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2095 dev->cell_index, dev->ofdev->node->full_name);
2096 info->n_stats = emac_ethtool_get_stats_count(ndev);
2097 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2100 static const struct ethtool_ops emac_ethtool_ops = {
2101 .get_settings = emac_ethtool_get_settings,
2102 .set_settings = emac_ethtool_set_settings,
2103 .get_drvinfo = emac_ethtool_get_drvinfo,
2105 .get_regs_len = emac_ethtool_get_regs_len,
2106 .get_regs = emac_ethtool_get_regs,
2108 .nway_reset = emac_ethtool_nway_reset,
2110 .get_ringparam = emac_ethtool_get_ringparam,
2111 .get_pauseparam = emac_ethtool_get_pauseparam,
2113 .get_rx_csum = emac_ethtool_get_rx_csum,
2115 .get_strings = emac_ethtool_get_strings,
2116 .get_stats_count = emac_ethtool_get_stats_count,
2117 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2119 .get_link = ethtool_op_get_link,
2120 .get_tx_csum = ethtool_op_get_tx_csum,
2121 .get_sg = ethtool_op_get_sg,
2124 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2126 struct emac_instance *dev = netdev_priv(ndev);
2127 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2129 DBG(dev, "ioctl %08x" NL, cmd);
2131 if (dev->phy.address < 0)
2136 case SIOCDEVPRIVATE:
2137 data[0] = dev->phy.address;
2140 case SIOCDEVPRIVATE + 1:
2141 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2145 case SIOCDEVPRIVATE + 2:
2146 if (!capable(CAP_NET_ADMIN))
2148 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2155 struct emac_depentry {
2157 struct device_node *node;
2158 struct of_device *ofdev;
2162 #define EMAC_DEP_MAL_IDX 0
2163 #define EMAC_DEP_ZMII_IDX 1
2164 #define EMAC_DEP_RGMII_IDX 2
2165 #define EMAC_DEP_TAH_IDX 3
2166 #define EMAC_DEP_MDIO_IDX 4
2167 #define EMAC_DEP_PREV_IDX 5
2168 #define EMAC_DEP_COUNT 6
2170 static int __devinit emac_check_deps(struct emac_instance *dev,
2171 struct emac_depentry *deps)
2174 struct device_node *np;
2176 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2177 /* no dependency on that item, allright */
2178 if (deps[i].phandle == 0) {
2182 /* special case for blist as the dependency might go away */
2183 if (i == EMAC_DEP_PREV_IDX) {
2184 np = *(dev->blist - 1);
2186 deps[i].phandle = 0;
2190 if (deps[i].node == NULL)
2191 deps[i].node = of_node_get(np);
2193 if (deps[i].node == NULL)
2194 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2195 if (deps[i].node == NULL)
2197 if (deps[i].ofdev == NULL)
2198 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2199 if (deps[i].ofdev == NULL)
2201 if (deps[i].drvdata == NULL)
2202 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2203 if (deps[i].drvdata != NULL)
2206 return (there == EMAC_DEP_COUNT);
2209 static void emac_put_deps(struct emac_instance *dev)
2212 of_dev_put(dev->mal_dev);
2214 of_dev_put(dev->zmii_dev);
2216 of_dev_put(dev->rgmii_dev);
2218 of_dev_put(dev->mdio_dev);
2220 of_dev_put(dev->tah_dev);
2223 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2224 unsigned long action, void *data)
2226 /* We are only intereted in device addition */
2227 if (action == BUS_NOTIFY_BOUND_DRIVER)
2228 wake_up_all(&emac_probe_wait);
2232 static struct notifier_block emac_of_bus_notifier = {
2233 .notifier_call = emac_of_bus_notify
2236 static int __devinit emac_wait_deps(struct emac_instance *dev)
2238 struct emac_depentry deps[EMAC_DEP_COUNT];
2241 memset(&deps, 0, sizeof(deps));
2243 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2244 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2245 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2247 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2249 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2250 if (dev->blist && dev->blist > emac_boot_list)
2251 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2252 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2253 wait_event_timeout(emac_probe_wait,
2254 emac_check_deps(dev, deps),
2255 EMAC_PROBE_DEP_TIMEOUT);
2256 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2257 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2258 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2260 of_node_put(deps[i].node);
2261 if (err && deps[i].ofdev)
2262 of_dev_put(deps[i].ofdev);
2265 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2266 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2267 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2268 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2269 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2271 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2272 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2276 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2277 u32 *val, int fatal)
2280 const u32 *prop = of_get_property(np, name, &len);
2281 if (prop == NULL || len < sizeof(u32)) {
2283 printk(KERN_ERR "%s: missing %s property\n",
2284 np->full_name, name);
2291 static int __devinit emac_init_phy(struct emac_instance *dev)
2293 struct device_node *np = dev->ofdev->node;
2294 struct net_device *ndev = dev->ndev;
2298 dev->phy.dev = ndev;
2299 dev->phy.mode = dev->phy_mode;
2301 /* PHY-less configuration.
2302 * XXX I probably should move these settings to the dev tree
2304 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2307 /* PHY-less configuration.
2308 * XXX I probably should move these settings to the dev tree
2310 dev->phy.address = -1;
2311 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2317 mutex_lock(&emac_phy_map_lock);
2318 phy_map = dev->phy_map | busy_phy_map;
2320 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2322 dev->phy.mdio_read = emac_mdio_read;
2323 dev->phy.mdio_write = emac_mdio_write;
2325 /* Configure EMAC with defaults so we can at least use MDIO
2326 * This is needed mostly for 440GX
2328 if (emac_phy_gpcs(dev->phy.mode)) {
2330 * Make GPCS PHY address equal to EMAC index.
2331 * We probably should take into account busy_phy_map
2332 * and/or phy_map here.
2334 * Note that the busy_phy_map is currently global
2335 * while it should probably be per-ASIC...
2337 dev->phy.address = dev->cell_index;
2340 emac_configure(dev);
2342 if (dev->phy_address != 0xffffffff)
2343 phy_map = ~(1 << dev->phy_address);
2345 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2346 if (!(phy_map & 1)) {
2348 busy_phy_map |= 1 << i;
2350 /* Quick check if there is a PHY at the address */
2351 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2352 if (r == 0xffff || r < 0)
2354 if (!emac_mii_phy_probe(&dev->phy, i))
2357 mutex_unlock(&emac_phy_map_lock);
2359 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2364 if (dev->phy.def->ops->init)
2365 dev->phy.def->ops->init(&dev->phy);
2367 /* Disable any PHY features not supported by the platform */
2368 dev->phy.def->features &= ~dev->phy_feat_exc;
2370 /* Setup initial link parameters */
2371 if (dev->phy.features & SUPPORTED_Autoneg) {
2372 adv = dev->phy.features;
2373 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2374 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2375 /* Restart autonegotiation */
2376 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2378 u32 f = dev->phy.def->features;
2379 int speed = SPEED_10, fd = DUPLEX_HALF;
2381 /* Select highest supported speed/duplex */
2382 if (f & SUPPORTED_1000baseT_Full) {
2385 } else if (f & SUPPORTED_1000baseT_Half)
2387 else if (f & SUPPORTED_100baseT_Full) {
2390 } else if (f & SUPPORTED_100baseT_Half)
2392 else if (f & SUPPORTED_10baseT_Full)
2395 /* Force link parameters */
2396 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2401 static int __devinit emac_init_config(struct emac_instance *dev)
2403 struct device_node *np = dev->ofdev->node;
2406 const char *pm, *phy_modes[] = {
2408 [PHY_MODE_MII] = "mii",
2409 [PHY_MODE_RMII] = "rmii",
2410 [PHY_MODE_SMII] = "smii",
2411 [PHY_MODE_RGMII] = "rgmii",
2412 [PHY_MODE_TBI] = "tbi",
2413 [PHY_MODE_GMII] = "gmii",
2414 [PHY_MODE_RTBI] = "rtbi",
2415 [PHY_MODE_SGMII] = "sgmii",
2418 /* Read config from device-tree */
2419 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2421 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2423 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2425 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2427 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2428 dev->max_mtu = 1500;
2429 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2430 dev->rx_fifo_size = 2048;
2431 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2432 dev->tx_fifo_size = 2048;
2433 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2434 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2435 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2436 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2437 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2438 dev->phy_address = 0xffffffff;
2439 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2440 dev->phy_map = 0xffffffff;
2441 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2443 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2445 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2447 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2449 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2451 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2452 dev->zmii_port = 0xffffffff;;
2453 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2455 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2456 dev->rgmii_port = 0xffffffff;;
2457 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2458 dev->fifo_entry_size = 16;
2459 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2460 dev->mal_burst_size = 256;
2462 /* PHY mode needs some decoding */
2463 dev->phy_mode = PHY_MODE_NA;
2464 pm = of_get_property(np, "phy-mode", &plen);
2467 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2468 if (!strcasecmp(pm, phy_modes[i])) {
2474 /* Backward compat with non-final DT */
2475 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2476 u32 nmode = *(const u32 *)pm;
2477 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2478 dev->phy_mode = nmode;
2481 /* Check EMAC version */
2482 if (of_device_is_compatible(np, "ibm,emac4"))
2483 dev->features |= EMAC_FTR_EMAC4;
2485 /* Fixup some feature bits based on the device tree */
2486 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2487 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2488 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2489 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2491 /* CAB lacks the appropriate properties */
2492 if (of_device_is_compatible(np, "ibm,emac-axon"))
2493 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2494 EMAC_FTR_STACR_OC_INVERT;
2496 /* Enable TAH/ZMII/RGMII features as found */
2497 if (dev->tah_ph != 0) {
2498 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2499 dev->features |= EMAC_FTR_HAS_TAH;
2501 printk(KERN_ERR "%s: TAH support not enabled !\n",
2507 if (dev->zmii_ph != 0) {
2508 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2509 dev->features |= EMAC_FTR_HAS_ZMII;
2511 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2517 if (dev->rgmii_ph != 0) {
2518 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2519 dev->features |= EMAC_FTR_HAS_RGMII;
2521 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2527 /* Read MAC-address */
2528 p = of_get_property(np, "local-mac-address", NULL);
2530 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2534 memcpy(dev->ndev->dev_addr, p, 6);
2536 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2537 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2538 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2539 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2540 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2545 static int __devinit emac_probe(struct of_device *ofdev,
2546 const struct of_device_id *match)
2548 struct net_device *ndev;
2549 struct emac_instance *dev;
2550 struct device_node *np = ofdev->node;
2551 struct device_node **blist = NULL;
2554 /* Skip unused/unwired EMACS */
2555 if (of_get_property(np, "unused", NULL))
2558 /* Find ourselves in the bootlist if we are there */
2559 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2560 if (emac_boot_list[i] == np)
2561 blist = &emac_boot_list[i];
2563 /* Allocate our net_device structure */
2565 ndev = alloc_etherdev(sizeof(struct emac_instance));
2567 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2571 dev = netdev_priv(ndev);
2575 SET_NETDEV_DEV(ndev, &ofdev->dev);
2577 /* Initialize some embedded data structures */
2578 mutex_init(&dev->mdio_lock);
2579 mutex_init(&dev->link_lock);
2580 spin_lock_init(&dev->lock);
2581 INIT_WORK(&dev->reset_work, emac_reset_work);
2583 /* Init various config data based on device-tree */
2584 err = emac_init_config(dev);
2588 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2589 dev->emac_irq = irq_of_parse_and_map(np, 0);
2590 dev->wol_irq = irq_of_parse_and_map(np, 1);
2591 if (dev->emac_irq == NO_IRQ) {
2592 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2595 ndev->irq = dev->emac_irq;
2598 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2599 printk(KERN_ERR "%s: Can't get registers address\n",
2603 // TODO : request_mem_region
2604 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2605 if (dev->emacp == NULL) {
2606 printk(KERN_ERR "%s: Can't map device registers!\n",
2612 /* Wait for dependent devices */
2613 err = emac_wait_deps(dev);
2616 "%s: Timeout waiting for dependent devices\n",
2618 /* display more info about what's missing ? */
2621 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2622 if (dev->mdio_dev != NULL)
2623 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2625 /* Register with MAL */
2626 dev->commac.ops = &emac_commac_ops;
2627 dev->commac.dev = dev;
2628 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2629 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2630 err = mal_register_commac(dev->mal, &dev->commac);
2632 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2633 np->full_name, dev->mal_dev->node->full_name);
2636 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2637 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2639 /* Get pointers to BD rings */
2641 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2643 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2645 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2646 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2649 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2650 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2652 /* Attach to ZMII, if needed */
2653 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2654 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2655 goto err_unreg_commac;
2657 /* Attach to RGMII, if needed */
2658 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2659 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2660 goto err_detach_zmii;
2662 /* Attach to TAH, if needed */
2663 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2664 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2665 goto err_detach_rgmii;
2667 /* Set some link defaults before we can find out real parameters */
2668 dev->phy.speed = SPEED_100;
2669 dev->phy.duplex = DUPLEX_FULL;
2670 dev->phy.autoneg = AUTONEG_DISABLE;
2671 dev->phy.pause = dev->phy.asym_pause = 0;
2672 dev->stop_timeout = STOP_TIMEOUT_100;
2673 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2675 /* Find PHY if any */
2676 err = emac_init_phy(dev);
2678 goto err_detach_tah;
2680 /* Fill in the driver function table */
2681 ndev->open = &emac_open;
2683 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2684 ndev->tx_timeout = &emac_tx_timeout;
2685 ndev->watchdog_timeo = 5 * HZ;
2686 ndev->stop = &emac_close;
2687 ndev->get_stats = &emac_stats;
2688 ndev->set_multicast_list = &emac_set_multicast_list;
2689 ndev->do_ioctl = &emac_ioctl;
2690 if (emac_phy_supports_gige(dev->phy_mode)) {
2691 ndev->hard_start_xmit = &emac_start_xmit_sg;
2692 ndev->change_mtu = &emac_change_mtu;
2693 dev->commac.ops = &emac_commac_sg_ops;
2695 ndev->hard_start_xmit = &emac_start_xmit;
2697 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2699 netif_carrier_off(ndev);
2700 netif_stop_queue(ndev);
2702 err = register_netdev(ndev);
2704 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2705 np->full_name, err);
2706 goto err_detach_tah;
2709 /* Set our drvdata last as we don't want them visible until we are
2713 dev_set_drvdata(&ofdev->dev, dev);
2715 /* There's a new kid in town ! Let's tell everybody */
2716 wake_up_all(&emac_probe_wait);
2720 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2721 ndev->name, dev->cell_index, np->full_name,
2722 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2723 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2725 if (dev->phy.address >= 0)
2726 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2727 dev->phy.def->name, dev->phy.address);
2729 emac_dbg_register(dev);
2734 /* I have a bad feeling about this ... */
2737 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2738 tah_detach(dev->tah_dev, dev->tah_port);
2740 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2741 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2743 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2744 zmii_detach(dev->zmii_dev, dev->zmii_port);
2746 mal_unregister_commac(dev->mal, &dev->commac);
2750 iounmap(dev->emacp);
2752 if (dev->wol_irq != NO_IRQ)
2753 irq_dispose_mapping(dev->wol_irq);
2754 if (dev->emac_irq != NO_IRQ)
2755 irq_dispose_mapping(dev->emac_irq);
2759 /* if we were on the bootlist, remove us as we won't show up and
2760 * wake up all waiters to notify them in case they were waiting
2765 wake_up_all(&emac_probe_wait);
2770 static int __devexit emac_remove(struct of_device *ofdev)
2772 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2774 DBG(dev, "remove" NL);
2776 dev_set_drvdata(&ofdev->dev, NULL);
2778 unregister_netdev(dev->ndev);
2780 flush_scheduled_work();
2782 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2783 tah_detach(dev->tah_dev, dev->tah_port);
2784 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2785 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2786 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2787 zmii_detach(dev->zmii_dev, dev->zmii_port);
2789 mal_unregister_commac(dev->mal, &dev->commac);
2792 emac_dbg_unregister(dev);
2793 iounmap(dev->emacp);
2795 if (dev->wol_irq != NO_IRQ)
2796 irq_dispose_mapping(dev->wol_irq);
2797 if (dev->emac_irq != NO_IRQ)
2798 irq_dispose_mapping(dev->emac_irq);
2805 /* XXX Features in here should be replaced by properties... */
2806 static struct of_device_id emac_match[] =
2810 .compatible = "ibm,emac",
2814 .compatible = "ibm,emac4",
2819 static struct of_platform_driver emac_driver = {
2821 .match_table = emac_match,
2823 .probe = emac_probe,
2824 .remove = emac_remove,
2827 static void __init emac_make_bootlist(void)
2829 struct device_node *np = NULL;
2830 int j, max, i = 0, k;
2831 int cell_indices[EMAC_BOOT_LIST_SIZE];
2834 while((np = of_find_all_nodes(np)) != NULL) {
2837 if (of_match_node(emac_match, np) == NULL)
2839 if (of_get_property(np, "unused", NULL))
2841 idx = of_get_property(np, "cell-index", NULL);
2844 cell_indices[i] = *idx;
2845 emac_boot_list[i++] = of_node_get(np);
2846 if (i >= EMAC_BOOT_LIST_SIZE) {
2853 /* Bubble sort them (doh, what a creative algorithm :-) */
2854 for (i = 0; max > 1 && (i < (max - 1)); i++)
2855 for (j = i; j < max; j++) {
2856 if (cell_indices[i] > cell_indices[j]) {
2857 np = emac_boot_list[i];
2858 emac_boot_list[i] = emac_boot_list[j];
2859 emac_boot_list[j] = np;
2860 k = cell_indices[i];
2861 cell_indices[i] = cell_indices[j];
2862 cell_indices[j] = k;
2867 static int __init emac_init(void)
2871 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2873 /* Init debug stuff */
2876 /* Build EMAC boot list */
2877 emac_make_bootlist();
2879 /* Init submodules */
2892 rc = of_register_platform_driver(&emac_driver);
2910 static void __exit emac_exit(void)
2914 of_unregister_platform_driver(&emac_driver);
2922 /* Destroy EMAC boot list */
2923 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2924 if (emac_boot_list[i])
2925 of_node_put(emac_boot_list[i]);
2928 module_init(emac_init);
2929 module_exit(emac_exit);