2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
50 * Lack of dma_unmap_???? calls is intentional.
52 * API-correct usage requires additional support state information to be
53 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54 * EMAC design (e.g. TX buffer passed from network stack can be split into
55 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56 * maintaining such information will add additional overhead.
57 * Current DMA API implementation for 4xx processors only ensures cache coherency
58 * and dma_unmap_???? routines are empty and are likely to stay this way.
59 * I decided to omit dma_unmap_??? calls because I don't want to add additional
60 * complexity just for the sake of following some abstract API, when it doesn't
61 * add any real benefit to the driver. I understand that this decision maybe
62 * controversial, but I really tried to make code API-correct and efficient
63 * at the same time and didn't come up with code I liked :(. --ebs
66 #define DRV_NAME "emac"
67 #define DRV_VERSION "3.54"
68 #define DRV_DESC "PPC 4xx OCP EMAC driver"
70 MODULE_DESCRIPTION(DRV_DESC);
72 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
76 * PPC64 doesn't (yet) have a cacheable_memcpy
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
131 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON HZ
136 #define PHY_POLL_LINK_OFF (HZ / 5)
138 /* Graceful stop timeouts in us.
139 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
141 #define STOP_TIMEOUT_10 1230
142 #define STOP_TIMEOUT_100 124
143 #define STOP_TIMEOUT_1000 13
144 #define STOP_TIMEOUT_1000_JUMBO 73
146 static unsigned char default_mcast_addr[] = {
147 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
150 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
151 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
152 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
153 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
154 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
155 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
156 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
157 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
158 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
159 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
160 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
161 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
162 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
163 "tx_bd_excessive_collisions", "tx_bd_late_collision",
164 "tx_bd_multple_collisions", "tx_bd_single_collision",
165 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
169 static irqreturn_t emac_irq(int irq, void *dev_instance);
170 static void emac_clean_tx_ring(struct emac_instance *dev);
171 static void __emac_set_multicast_list(struct emac_instance *dev);
173 static inline int emac_phy_supports_gige(int phy_mode)
175 return phy_mode == PHY_MODE_GMII ||
176 phy_mode == PHY_MODE_RGMII ||
177 phy_mode == PHY_MODE_TBI ||
178 phy_mode == PHY_MODE_RTBI;
181 static inline int emac_phy_gpcs(int phy_mode)
183 return phy_mode == PHY_MODE_TBI ||
184 phy_mode == PHY_MODE_RTBI;
187 static inline void emac_tx_enable(struct emac_instance *dev)
189 struct emac_regs __iomem *p = dev->emacp;
192 DBG(dev, "tx_enable" NL);
194 r = in_be32(&p->mr0);
195 if (!(r & EMAC_MR0_TXE))
196 out_be32(&p->mr0, r | EMAC_MR0_TXE);
199 static void emac_tx_disable(struct emac_instance *dev)
201 struct emac_regs __iomem *p = dev->emacp;
204 DBG(dev, "tx_disable" NL);
206 r = in_be32(&p->mr0);
207 if (r & EMAC_MR0_TXE) {
208 int n = dev->stop_timeout;
209 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
210 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
215 emac_report_timeout_error(dev, "TX disable timeout");
219 static void emac_rx_enable(struct emac_instance *dev)
221 struct emac_regs __iomem *p = dev->emacp;
224 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
227 DBG(dev, "rx_enable" NL);
229 r = in_be32(&p->mr0);
230 if (!(r & EMAC_MR0_RXE)) {
231 if (unlikely(!(r & EMAC_MR0_RXI))) {
232 /* Wait if previous async disable is still in progress */
233 int n = dev->stop_timeout;
234 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
239 emac_report_timeout_error(dev,
240 "RX disable timeout");
242 out_be32(&p->mr0, r | EMAC_MR0_RXE);
248 static void emac_rx_disable(struct emac_instance *dev)
250 struct emac_regs __iomem *p = dev->emacp;
253 DBG(dev, "rx_disable" NL);
255 r = in_be32(&p->mr0);
256 if (r & EMAC_MR0_RXE) {
257 int n = dev->stop_timeout;
258 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
259 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264 emac_report_timeout_error(dev, "RX disable timeout");
268 static inline void emac_netif_stop(struct emac_instance *dev)
270 netif_tx_lock_bh(dev->ndev);
272 netif_tx_unlock_bh(dev->ndev);
273 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
274 mal_poll_disable(dev->mal, &dev->commac);
275 netif_tx_disable(dev->ndev);
278 static inline void emac_netif_start(struct emac_instance *dev)
280 netif_tx_lock_bh(dev->ndev);
282 if (dev->mcast_pending && netif_running(dev->ndev))
283 __emac_set_multicast_list(dev);
284 netif_tx_unlock_bh(dev->ndev);
286 netif_wake_queue(dev->ndev);
288 /* NOTE: unconditional netif_wake_queue is only appropriate
289 * so long as all callers are assured to have free tx slots
290 * (taken from tg3... though the case where that is wrong is
291 * not terribly harmful)
293 mal_poll_enable(dev->mal, &dev->commac);
296 static inline void emac_rx_disable_async(struct emac_instance *dev)
298 struct emac_regs __iomem *p = dev->emacp;
301 DBG(dev, "rx_disable_async" NL);
303 r = in_be32(&p->mr0);
304 if (r & EMAC_MR0_RXE)
305 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
308 static int emac_reset(struct emac_instance *dev)
310 struct emac_regs __iomem *p = dev->emacp;
313 DBG(dev, "reset" NL);
315 if (!dev->reset_failed) {
316 /* 40x erratum suggests stopping RX channel before reset,
319 emac_rx_disable(dev);
320 emac_tx_disable(dev);
323 out_be32(&p->mr0, EMAC_MR0_SRST);
324 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
328 dev->reset_failed = 0;
331 emac_report_timeout_error(dev, "reset timeout");
332 dev->reset_failed = 1;
337 static void emac_hash_mc(struct emac_instance *dev)
339 struct emac_regs __iomem *p = dev->emacp;
341 struct dev_mc_list *dmi;
343 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
345 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
347 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
348 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
349 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
351 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
352 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
354 out_be32(&p->gaht1, gaht[0]);
355 out_be32(&p->gaht2, gaht[1]);
356 out_be32(&p->gaht3, gaht[2]);
357 out_be32(&p->gaht4, gaht[3]);
360 static inline u32 emac_iff2rmr(struct net_device *ndev)
362 struct emac_instance *dev = netdev_priv(ndev);
365 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
367 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
372 if (ndev->flags & IFF_PROMISC)
374 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
376 else if (ndev->mc_count > 0)
382 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
384 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
386 DBG2(dev, "__emac_calc_base_mr1" NL);
390 ret |= EMAC_MR1_TFS_2K;
393 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
394 dev->ndev->name, tx_size);
399 ret |= EMAC_MR1_RFS_16K;
402 ret |= EMAC_MR1_RFS_4K;
405 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
406 dev->ndev->name, rx_size);
412 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
414 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
415 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
417 DBG2(dev, "__emac4_calc_base_mr1" NL);
421 ret |= EMAC4_MR1_TFS_4K;
424 ret |= EMAC4_MR1_TFS_2K;
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
433 ret |= EMAC4_MR1_RFS_16K;
436 ret |= EMAC4_MR1_RFS_4K;
439 ret |= EMAC4_MR1_RFS_2K;
442 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
443 dev->ndev->name, rx_size);
449 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
451 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
452 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
453 __emac_calc_base_mr1(dev, tx_size, rx_size);
456 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
458 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
459 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
461 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
464 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
465 unsigned int low, unsigned int high)
467 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
468 return (low << 22) | ( (high & 0x3ff) << 6);
470 return (low << 23) | ( (high & 0x1ff) << 7);
473 static int emac_configure(struct emac_instance *dev)
475 struct emac_regs __iomem *p = dev->emacp;
476 struct net_device *ndev = dev->ndev;
477 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
480 DBG(dev, "configure" NL);
483 out_be32(&p->mr1, in_be32(&p->mr1)
484 | EMAC_MR1_FDE | EMAC_MR1_ILE);
486 } else if (emac_reset(dev) < 0)
489 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
490 tah_reset(dev->tah_dev);
492 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
493 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
495 /* Default fifo sizes */
496 tx_size = dev->tx_fifo_size;
497 rx_size = dev->rx_fifo_size;
499 /* No link, force loopback */
501 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
503 /* Check for full duplex */
504 else if (dev->phy.duplex == DUPLEX_FULL)
505 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
507 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
508 dev->stop_timeout = STOP_TIMEOUT_10;
509 switch (dev->phy.speed) {
511 if (emac_phy_gpcs(dev->phy.mode)) {
512 mr1 |= EMAC_MR1_MF_1000GPCS |
513 EMAC_MR1_MF_IPPA(dev->phy.address);
515 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
516 * identify this GPCS PHY later.
518 out_be32(&p->ipcr, 0xdeadbeef);
520 mr1 |= EMAC_MR1_MF_1000;
522 /* Extended fifo sizes */
523 tx_size = dev->tx_fifo_size_gige;
524 rx_size = dev->rx_fifo_size_gige;
526 if (dev->ndev->mtu > ETH_DATA_LEN) {
527 mr1 |= EMAC_MR1_JPSM;
528 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
530 dev->stop_timeout = STOP_TIMEOUT_1000;
533 mr1 |= EMAC_MR1_MF_100;
534 dev->stop_timeout = STOP_TIMEOUT_100;
536 default: /* make gcc happy */
540 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
541 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
543 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
544 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
546 /* on 40x erratum forces us to NOT use integrated flow control,
547 * let's hope it works on 44x ;)
549 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
550 dev->phy.duplex == DUPLEX_FULL) {
552 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
553 else if (dev->phy.asym_pause)
557 /* Add base settings & fifo sizes & program MR1 */
558 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
559 out_be32(&p->mr1, mr1);
561 /* Set individual MAC address */
562 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
563 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
564 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
567 /* VLAN Tag Protocol ID */
568 out_be32(&p->vtpid, 0x8100);
570 /* Receive mode register */
571 r = emac_iff2rmr(ndev);
572 if (r & EMAC_RMR_MAE)
574 out_be32(&p->rmr, r);
576 /* FIFOs thresholds */
577 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
578 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
579 tx_size / 2 / dev->fifo_entry_size);
581 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
582 tx_size / 2 / dev->fifo_entry_size);
583 out_be32(&p->tmr1, r);
584 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
586 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
587 there should be still enough space in FIFO to allow the our link
588 partner time to process this frame and also time to send PAUSE
591 Here is the worst case scenario for the RX FIFO "headroom"
592 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
594 1) One maximum-length frame on TX 1522 bytes
595 2) One PAUSE frame time 64 bytes
596 3) PAUSE frame decode time allowance 64 bytes
597 4) One maximum-length frame on RX 1522 bytes
598 5) Round-trip propagation delay of the link (100Mb) 15 bytes
602 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
603 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
605 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
606 rx_size / 4 / dev->fifo_entry_size);
607 out_be32(&p->rwmr, r);
609 /* Set PAUSE timer to the maximum */
610 out_be32(&p->ptr, 0xffff);
613 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
614 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
615 EMAC_ISR_IRE | EMAC_ISR_TE;
616 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
617 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
619 out_be32(&p->iser, r);
621 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
622 if (emac_phy_gpcs(dev->phy.mode))
623 emac_mii_reset_phy(&dev->phy);
625 /* Required for Pause packet support in EMAC */
626 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
631 static void emac_reinitialize(struct emac_instance *dev)
633 DBG(dev, "reinitialize" NL);
635 emac_netif_stop(dev);
636 if (!emac_configure(dev)) {
640 emac_netif_start(dev);
643 static void emac_full_tx_reset(struct emac_instance *dev)
645 DBG(dev, "full_tx_reset" NL);
647 emac_tx_disable(dev);
648 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
649 emac_clean_tx_ring(dev);
650 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
654 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
659 static void emac_reset_work(struct work_struct *work)
661 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
663 DBG(dev, "reset_work" NL);
665 mutex_lock(&dev->link_lock);
667 emac_netif_stop(dev);
668 emac_full_tx_reset(dev);
669 emac_netif_start(dev);
671 mutex_unlock(&dev->link_lock);
674 static void emac_tx_timeout(struct net_device *ndev)
676 struct emac_instance *dev = netdev_priv(ndev);
678 DBG(dev, "tx_timeout" NL);
680 schedule_work(&dev->reset_work);
684 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
686 int done = !!(stacr & EMAC_STACR_OC);
688 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
694 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
696 struct emac_regs __iomem *p = dev->emacp;
698 int n, err = -ETIMEDOUT;
700 mutex_lock(&dev->mdio_lock);
702 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
704 /* Enable proper MDIO port */
705 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
706 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
707 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
708 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
710 /* Wait for management interface to become idle */
712 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
715 DBG2(dev, " -> timeout wait idle\n");
720 /* Issue read command */
721 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
722 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
724 r = EMAC_STACR_BASE(dev->opb_bus_freq);
725 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
727 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
728 r |= EMACX_STACR_STAC_READ;
730 r |= EMAC_STACR_STAC_READ;
731 r |= (reg & EMAC_STACR_PRA_MASK)
732 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
733 out_be32(&p->stacr, r);
735 /* Wait for read to complete */
737 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
740 DBG2(dev, " -> timeout wait complete\n");
745 if (unlikely(r & EMAC_STACR_PHYE)) {
746 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
751 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
753 DBG2(dev, "mdio_read -> %04x" NL, r);
756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
757 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
758 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
759 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
760 mutex_unlock(&dev->mdio_lock);
762 return err == 0 ? r : err;
765 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
768 struct emac_regs __iomem *p = dev->emacp;
770 int n, err = -ETIMEDOUT;
772 mutex_lock(&dev->mdio_lock);
774 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
776 /* Enable proper MDIO port */
777 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
778 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
779 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
780 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
782 /* Wait for management interface to be idle */
784 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
787 DBG2(dev, " -> timeout wait idle\n");
792 /* Issue write command */
793 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
794 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
796 r = EMAC_STACR_BASE(dev->opb_bus_freq);
797 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
799 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
800 r |= EMACX_STACR_STAC_WRITE;
802 r |= EMAC_STACR_STAC_WRITE;
803 r |= (reg & EMAC_STACR_PRA_MASK) |
804 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
805 (val << EMAC_STACR_PHYD_SHIFT);
806 out_be32(&p->stacr, r);
808 /* Wait for write to complete */
810 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
813 DBG2(dev, " -> timeout wait complete\n");
819 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
820 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
821 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
822 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
823 mutex_unlock(&dev->mdio_lock);
826 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
828 struct emac_instance *dev = netdev_priv(ndev);
831 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
836 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
838 struct emac_instance *dev = netdev_priv(ndev);
840 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
841 (u8) id, (u8) reg, (u16) val);
845 static void __emac_set_multicast_list(struct emac_instance *dev)
847 struct emac_regs __iomem *p = dev->emacp;
848 u32 rmr = emac_iff2rmr(dev->ndev);
850 DBG(dev, "__multicast %08x" NL, rmr);
852 /* I decided to relax register access rules here to avoid
855 * There is a real problem with EMAC4 core if we use MWSW_001 bit
856 * in MR1 register and do a full EMAC reset.
857 * One TX BD status update is delayed and, after EMAC reset, it
858 * never happens, resulting in TX hung (it'll be recovered by TX
859 * timeout handler eventually, but this is just gross).
860 * So we either have to do full TX reset or try to cheat here :)
862 * The only required change is to RX mode register, so I *think* all
863 * we need is just to stop RX channel. This seems to work on all
866 * If we need the full reset, we might just trigger the workqueue
867 * and do it async... a bit nasty but should work --BenH
869 dev->mcast_pending = 0;
870 emac_rx_disable(dev);
871 if (rmr & EMAC_RMR_MAE)
873 out_be32(&p->rmr, rmr);
878 static void emac_set_multicast_list(struct net_device *ndev)
880 struct emac_instance *dev = netdev_priv(ndev);
882 DBG(dev, "multicast" NL);
884 BUG_ON(!netif_running(dev->ndev));
887 dev->mcast_pending = 1;
890 __emac_set_multicast_list(dev);
893 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
895 int rx_sync_size = emac_rx_sync_size(new_mtu);
896 int rx_skb_size = emac_rx_skb_size(new_mtu);
899 mutex_lock(&dev->link_lock);
900 emac_netif_stop(dev);
901 emac_rx_disable(dev);
902 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
904 if (dev->rx_sg_skb) {
905 ++dev->estats.rx_dropped_resize;
906 dev_kfree_skb(dev->rx_sg_skb);
907 dev->rx_sg_skb = NULL;
910 /* Make a first pass over RX ring and mark BDs ready, dropping
911 * non-processed packets on the way. We need this as a separate pass
912 * to simplify error recovery in the case of allocation failure later.
914 for (i = 0; i < NUM_RX_BUFF; ++i) {
915 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
916 ++dev->estats.rx_dropped_resize;
918 dev->rx_desc[i].data_len = 0;
919 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
920 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
923 /* Reallocate RX ring only if bigger skb buffers are required */
924 if (rx_skb_size <= dev->rx_skb_size)
927 /* Second pass, allocate new skbs */
928 for (i = 0; i < NUM_RX_BUFF; ++i) {
929 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
935 BUG_ON(!dev->rx_skb[i]);
936 dev_kfree_skb(dev->rx_skb[i]);
938 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
939 dev->rx_desc[i].data_ptr =
940 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
941 DMA_FROM_DEVICE) + 2;
942 dev->rx_skb[i] = skb;
945 /* Check if we need to change "Jumbo" bit in MR1 */
946 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
947 /* This is to prevent starting RX channel in emac_rx_enable() */
948 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
950 dev->ndev->mtu = new_mtu;
951 emac_full_tx_reset(dev);
954 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
957 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
959 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
961 emac_netif_start(dev);
962 mutex_unlock(&dev->link_lock);
967 /* Process ctx, rtnl_lock semaphore */
968 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
970 struct emac_instance *dev = netdev_priv(ndev);
973 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
976 DBG(dev, "change_mtu(%d)" NL, new_mtu);
978 if (netif_running(ndev)) {
979 /* Check if we really need to reinitalize RX ring */
980 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
981 ret = emac_resize_rx_ring(dev, new_mtu);
986 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
987 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
993 static void emac_clean_tx_ring(struct emac_instance *dev)
997 for (i = 0; i < NUM_TX_BUFF; ++i) {
998 if (dev->tx_skb[i]) {
999 dev_kfree_skb(dev->tx_skb[i]);
1000 dev->tx_skb[i] = NULL;
1001 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1002 ++dev->estats.tx_dropped;
1004 dev->tx_desc[i].ctrl = 0;
1005 dev->tx_desc[i].data_ptr = 0;
1009 static void emac_clean_rx_ring(struct emac_instance *dev)
1013 for (i = 0; i < NUM_RX_BUFF; ++i)
1014 if (dev->rx_skb[i]) {
1015 dev->rx_desc[i].ctrl = 0;
1016 dev_kfree_skb(dev->rx_skb[i]);
1017 dev->rx_skb[i] = NULL;
1018 dev->rx_desc[i].data_ptr = 0;
1021 if (dev->rx_sg_skb) {
1022 dev_kfree_skb(dev->rx_sg_skb);
1023 dev->rx_sg_skb = NULL;
1027 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1030 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1034 dev->rx_skb[slot] = skb;
1035 dev->rx_desc[slot].data_len = 0;
1037 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1038 dev->rx_desc[slot].data_ptr =
1039 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1040 DMA_FROM_DEVICE) + 2;
1042 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1043 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1048 static void emac_print_link_status(struct emac_instance *dev)
1050 if (netif_carrier_ok(dev->ndev))
1051 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1052 dev->ndev->name, dev->phy.speed,
1053 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1054 dev->phy.pause ? ", pause enabled" :
1055 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1057 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1060 /* Process ctx, rtnl_lock semaphore */
1061 static int emac_open(struct net_device *ndev)
1063 struct emac_instance *dev = netdev_priv(ndev);
1066 DBG(dev, "open" NL);
1068 /* Setup error IRQ handler */
1069 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1071 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1072 ndev->name, dev->emac_irq);
1076 /* Allocate RX ring */
1077 for (i = 0; i < NUM_RX_BUFF; ++i)
1078 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1079 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1084 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1085 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1086 dev->rx_sg_skb = NULL;
1088 mutex_lock(&dev->link_lock);
1091 /* Start PHY polling now.
1093 if (dev->phy.address >= 0) {
1094 int link_poll_interval;
1095 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1096 dev->phy.def->ops->read_link(&dev->phy);
1097 netif_carrier_on(dev->ndev);
1098 link_poll_interval = PHY_POLL_LINK_ON;
1100 netif_carrier_off(dev->ndev);
1101 link_poll_interval = PHY_POLL_LINK_OFF;
1103 dev->link_polling = 1;
1105 schedule_delayed_work(&dev->link_work, link_poll_interval);
1106 emac_print_link_status(dev);
1108 netif_carrier_on(dev->ndev);
1110 emac_configure(dev);
1111 mal_poll_add(dev->mal, &dev->commac);
1112 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1113 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1114 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1115 emac_tx_enable(dev);
1116 emac_rx_enable(dev);
1117 emac_netif_start(dev);
1119 mutex_unlock(&dev->link_lock);
1123 emac_clean_rx_ring(dev);
1124 free_irq(dev->emac_irq, dev);
1131 static int emac_link_differs(struct emac_instance *dev)
1133 u32 r = in_be32(&dev->emacp->mr1);
1135 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1136 int speed, pause, asym_pause;
1138 if (r & EMAC_MR1_MF_1000)
1140 else if (r & EMAC_MR1_MF_100)
1145 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1146 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1155 pause = asym_pause = 0;
1157 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1158 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1162 static void emac_link_timer(struct work_struct *work)
1164 struct emac_instance *dev =
1165 container_of((struct delayed_work *)work,
1166 struct emac_instance, link_work);
1167 int link_poll_interval;
1169 mutex_lock(&dev->link_lock);
1170 DBG2(dev, "link timer" NL);
1175 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1176 if (!netif_carrier_ok(dev->ndev)) {
1177 /* Get new link parameters */
1178 dev->phy.def->ops->read_link(&dev->phy);
1180 netif_carrier_on(dev->ndev);
1181 emac_netif_stop(dev);
1182 emac_full_tx_reset(dev);
1183 emac_netif_start(dev);
1184 emac_print_link_status(dev);
1186 link_poll_interval = PHY_POLL_LINK_ON;
1188 if (netif_carrier_ok(dev->ndev)) {
1189 netif_carrier_off(dev->ndev);
1190 netif_tx_disable(dev->ndev);
1191 emac_reinitialize(dev);
1192 emac_print_link_status(dev);
1194 link_poll_interval = PHY_POLL_LINK_OFF;
1196 schedule_delayed_work(&dev->link_work, link_poll_interval);
1198 mutex_unlock(&dev->link_lock);
1201 static void emac_force_link_update(struct emac_instance *dev)
1203 netif_carrier_off(dev->ndev);
1205 if (dev->link_polling) {
1206 cancel_rearming_delayed_work(&dev->link_work);
1207 if (dev->link_polling)
1208 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1212 /* Process ctx, rtnl_lock semaphore */
1213 static int emac_close(struct net_device *ndev)
1215 struct emac_instance *dev = netdev_priv(ndev);
1217 DBG(dev, "close" NL);
1219 if (dev->phy.address >= 0) {
1220 dev->link_polling = 0;
1221 cancel_rearming_delayed_work(&dev->link_work);
1223 mutex_lock(&dev->link_lock);
1224 emac_netif_stop(dev);
1226 mutex_unlock(&dev->link_lock);
1228 emac_rx_disable(dev);
1229 emac_tx_disable(dev);
1230 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1231 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1232 mal_poll_del(dev->mal, &dev->commac);
1234 emac_clean_tx_ring(dev);
1235 emac_clean_rx_ring(dev);
1237 free_irq(dev->emac_irq, dev);
1242 static inline u16 emac_tx_csum(struct emac_instance *dev,
1243 struct sk_buff *skb)
1245 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1246 skb->ip_summed == CHECKSUM_PARTIAL)) {
1247 ++dev->stats.tx_packets_csum;
1248 return EMAC_TX_CTRL_TAH_CSUM;
1253 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1255 struct emac_regs __iomem *p = dev->emacp;
1256 struct net_device *ndev = dev->ndev;
1258 /* Send the packet out. If the if makes a significant perf
1259 * difference, then we can store the TMR0 value in "dev"
1262 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1263 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1265 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1267 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1268 netif_stop_queue(ndev);
1269 DBG2(dev, "stopped TX queue" NL);
1272 ndev->trans_start = jiffies;
1273 ++dev->stats.tx_packets;
1274 dev->stats.tx_bytes += len;
1280 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1282 struct emac_instance *dev = netdev_priv(ndev);
1283 unsigned int len = skb->len;
1286 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1287 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1289 slot = dev->tx_slot++;
1290 if (dev->tx_slot == NUM_TX_BUFF) {
1292 ctrl |= MAL_TX_CTRL_WRAP;
1295 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1297 dev->tx_skb[slot] = skb;
1298 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1301 dev->tx_desc[slot].data_len = (u16) len;
1303 dev->tx_desc[slot].ctrl = ctrl;
1305 return emac_xmit_finish(dev, len);
1308 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1309 u32 pd, int len, int last, u16 base_ctrl)
1312 u16 ctrl = base_ctrl;
1313 int chunk = min(len, MAL_MAX_TX_SIZE);
1316 slot = (slot + 1) % NUM_TX_BUFF;
1319 ctrl |= MAL_TX_CTRL_LAST;
1320 if (slot == NUM_TX_BUFF - 1)
1321 ctrl |= MAL_TX_CTRL_WRAP;
1323 dev->tx_skb[slot] = NULL;
1324 dev->tx_desc[slot].data_ptr = pd;
1325 dev->tx_desc[slot].data_len = (u16) chunk;
1326 dev->tx_desc[slot].ctrl = ctrl;
1337 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1338 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1340 struct emac_instance *dev = netdev_priv(ndev);
1341 int nr_frags = skb_shinfo(skb)->nr_frags;
1342 int len = skb->len, chunk;
1347 /* This is common "fast" path */
1348 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1349 return emac_start_xmit(skb, ndev);
1351 len -= skb->data_len;
1353 /* Note, this is only an *estimation*, we can still run out of empty
1354 * slots because of the additional fragmentation into
1355 * MAL_MAX_TX_SIZE-sized chunks
1357 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1360 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1361 emac_tx_csum(dev, skb);
1362 slot = dev->tx_slot;
1365 dev->tx_skb[slot] = NULL;
1366 chunk = min(len, MAL_MAX_TX_SIZE);
1367 dev->tx_desc[slot].data_ptr = pd =
1368 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1369 dev->tx_desc[slot].data_len = (u16) chunk;
1372 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1375 for (i = 0; i < nr_frags; ++i) {
1376 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1379 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1382 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1385 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1389 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1391 /* Attach skb to the last slot so we don't release it too early */
1392 dev->tx_skb[slot] = skb;
1394 /* Send the packet out */
1395 if (dev->tx_slot == NUM_TX_BUFF - 1)
1396 ctrl |= MAL_TX_CTRL_WRAP;
1398 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1399 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1401 return emac_xmit_finish(dev, skb->len);
1404 /* Well, too bad. Our previous estimation was overly optimistic.
1407 while (slot != dev->tx_slot) {
1408 dev->tx_desc[slot].ctrl = 0;
1411 slot = NUM_TX_BUFF - 1;
1413 ++dev->estats.tx_undo;
1416 netif_stop_queue(ndev);
1417 DBG2(dev, "stopped TX queue" NL);
1422 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1424 struct emac_error_stats *st = &dev->estats;
1426 DBG(dev, "BD TX error %04x" NL, ctrl);
1429 if (ctrl & EMAC_TX_ST_BFCS)
1430 ++st->tx_bd_bad_fcs;
1431 if (ctrl & EMAC_TX_ST_LCS)
1432 ++st->tx_bd_carrier_loss;
1433 if (ctrl & EMAC_TX_ST_ED)
1434 ++st->tx_bd_excessive_deferral;
1435 if (ctrl & EMAC_TX_ST_EC)
1436 ++st->tx_bd_excessive_collisions;
1437 if (ctrl & EMAC_TX_ST_LC)
1438 ++st->tx_bd_late_collision;
1439 if (ctrl & EMAC_TX_ST_MC)
1440 ++st->tx_bd_multple_collisions;
1441 if (ctrl & EMAC_TX_ST_SC)
1442 ++st->tx_bd_single_collision;
1443 if (ctrl & EMAC_TX_ST_UR)
1444 ++st->tx_bd_underrun;
1445 if (ctrl & EMAC_TX_ST_SQE)
1449 static void emac_poll_tx(void *param)
1451 struct emac_instance *dev = param;
1454 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1456 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1457 bad_mask = EMAC_IS_BAD_TX_TAH;
1459 bad_mask = EMAC_IS_BAD_TX;
1461 netif_tx_lock_bh(dev->ndev);
1464 int slot = dev->ack_slot, n = 0;
1466 ctrl = dev->tx_desc[slot].ctrl;
1467 if (!(ctrl & MAL_TX_CTRL_READY)) {
1468 struct sk_buff *skb = dev->tx_skb[slot];
1473 dev->tx_skb[slot] = NULL;
1475 slot = (slot + 1) % NUM_TX_BUFF;
1477 if (unlikely(ctrl & bad_mask))
1478 emac_parse_tx_error(dev, ctrl);
1484 dev->ack_slot = slot;
1485 if (netif_queue_stopped(dev->ndev) &&
1486 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1487 netif_wake_queue(dev->ndev);
1489 DBG2(dev, "tx %d pkts" NL, n);
1492 netif_tx_unlock_bh(dev->ndev);
1495 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1498 struct sk_buff *skb = dev->rx_skb[slot];
1500 DBG2(dev, "recycle %d %d" NL, slot, len);
1503 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1504 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1506 dev->rx_desc[slot].data_len = 0;
1508 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1509 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1512 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1514 struct emac_error_stats *st = &dev->estats;
1516 DBG(dev, "BD RX error %04x" NL, ctrl);
1519 if (ctrl & EMAC_RX_ST_OE)
1520 ++st->rx_bd_overrun;
1521 if (ctrl & EMAC_RX_ST_BP)
1522 ++st->rx_bd_bad_packet;
1523 if (ctrl & EMAC_RX_ST_RP)
1524 ++st->rx_bd_runt_packet;
1525 if (ctrl & EMAC_RX_ST_SE)
1526 ++st->rx_bd_short_event;
1527 if (ctrl & EMAC_RX_ST_AE)
1528 ++st->rx_bd_alignment_error;
1529 if (ctrl & EMAC_RX_ST_BFCS)
1530 ++st->rx_bd_bad_fcs;
1531 if (ctrl & EMAC_RX_ST_PTL)
1532 ++st->rx_bd_packet_too_long;
1533 if (ctrl & EMAC_RX_ST_ORE)
1534 ++st->rx_bd_out_of_range;
1535 if (ctrl & EMAC_RX_ST_IRE)
1536 ++st->rx_bd_in_range;
1539 static inline void emac_rx_csum(struct emac_instance *dev,
1540 struct sk_buff *skb, u16 ctrl)
1542 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1543 if (!ctrl && dev->tah_dev) {
1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 ++dev->stats.rx_packets_csum;
1550 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1552 if (likely(dev->rx_sg_skb != NULL)) {
1553 int len = dev->rx_desc[slot].data_len;
1554 int tot_len = dev->rx_sg_skb->len + len;
1556 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1557 ++dev->estats.rx_dropped_mtu;
1558 dev_kfree_skb(dev->rx_sg_skb);
1559 dev->rx_sg_skb = NULL;
1561 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1562 dev->rx_skb[slot]->data, len);
1563 skb_put(dev->rx_sg_skb, len);
1564 emac_recycle_rx_skb(dev, slot, len);
1568 emac_recycle_rx_skb(dev, slot, 0);
1572 /* NAPI poll context */
1573 static int emac_poll_rx(void *param, int budget)
1575 struct emac_instance *dev = param;
1576 int slot = dev->rx_slot, received = 0;
1578 DBG2(dev, "poll_rx(%d)" NL, budget);
1581 while (budget > 0) {
1583 struct sk_buff *skb;
1584 u16 ctrl = dev->rx_desc[slot].ctrl;
1586 if (ctrl & MAL_RX_CTRL_EMPTY)
1589 skb = dev->rx_skb[slot];
1591 len = dev->rx_desc[slot].data_len;
1593 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1596 ctrl &= EMAC_BAD_RX_MASK;
1597 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1598 emac_parse_rx_error(dev, ctrl);
1599 ++dev->estats.rx_dropped_error;
1600 emac_recycle_rx_skb(dev, slot, 0);
1605 if (len && len < EMAC_RX_COPY_THRESH) {
1606 struct sk_buff *copy_skb =
1607 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1608 if (unlikely(!copy_skb))
1611 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1612 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1614 emac_recycle_rx_skb(dev, slot, len);
1616 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1621 skb->dev = dev->ndev;
1622 skb->protocol = eth_type_trans(skb, dev->ndev);
1623 emac_rx_csum(dev, skb, ctrl);
1625 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1626 ++dev->estats.rx_dropped_stack;
1628 ++dev->stats.rx_packets;
1630 dev->stats.rx_bytes += len;
1631 slot = (slot + 1) % NUM_RX_BUFF;
1636 if (ctrl & MAL_RX_CTRL_FIRST) {
1637 BUG_ON(dev->rx_sg_skb);
1638 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1639 DBG(dev, "rx OOM %d" NL, slot);
1640 ++dev->estats.rx_dropped_oom;
1641 emac_recycle_rx_skb(dev, slot, 0);
1643 dev->rx_sg_skb = skb;
1646 } else if (!emac_rx_sg_append(dev, slot) &&
1647 (ctrl & MAL_RX_CTRL_LAST)) {
1649 skb = dev->rx_sg_skb;
1650 dev->rx_sg_skb = NULL;
1652 ctrl &= EMAC_BAD_RX_MASK;
1653 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1654 emac_parse_rx_error(dev, ctrl);
1655 ++dev->estats.rx_dropped_error;
1663 DBG(dev, "rx OOM %d" NL, slot);
1664 /* Drop the packet and recycle skb */
1665 ++dev->estats.rx_dropped_oom;
1666 emac_recycle_rx_skb(dev, slot, 0);
1671 DBG2(dev, "rx %d BDs" NL, received);
1672 dev->rx_slot = slot;
1675 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1677 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1678 DBG2(dev, "rx restart" NL);
1683 if (dev->rx_sg_skb) {
1684 DBG2(dev, "dropping partial rx packet" NL);
1685 ++dev->estats.rx_dropped_error;
1686 dev_kfree_skb(dev->rx_sg_skb);
1687 dev->rx_sg_skb = NULL;
1690 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1691 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1692 emac_rx_enable(dev);
1698 /* NAPI poll context */
1699 static int emac_peek_rx(void *param)
1701 struct emac_instance *dev = param;
1703 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1706 /* NAPI poll context */
1707 static int emac_peek_rx_sg(void *param)
1709 struct emac_instance *dev = param;
1711 int slot = dev->rx_slot;
1713 u16 ctrl = dev->rx_desc[slot].ctrl;
1714 if (ctrl & MAL_RX_CTRL_EMPTY)
1716 else if (ctrl & MAL_RX_CTRL_LAST)
1719 slot = (slot + 1) % NUM_RX_BUFF;
1721 /* I'm just being paranoid here :) */
1722 if (unlikely(slot == dev->rx_slot))
1728 static void emac_rxde(void *param)
1730 struct emac_instance *dev = param;
1732 ++dev->estats.rx_stopped;
1733 emac_rx_disable_async(dev);
1737 static irqreturn_t emac_irq(int irq, void *dev_instance)
1739 struct emac_instance *dev = dev_instance;
1740 struct emac_regs __iomem *p = dev->emacp;
1741 struct emac_error_stats *st = &dev->estats;
1744 spin_lock(&dev->lock);
1746 isr = in_be32(&p->isr);
1747 out_be32(&p->isr, isr);
1749 DBG(dev, "isr = %08x" NL, isr);
1751 if (isr & EMAC4_ISR_TXPE)
1753 if (isr & EMAC4_ISR_RXPE)
1755 if (isr & EMAC4_ISR_TXUE)
1757 if (isr & EMAC4_ISR_RXOE)
1758 ++st->rx_fifo_overrun;
1759 if (isr & EMAC_ISR_OVR)
1761 if (isr & EMAC_ISR_BP)
1762 ++st->rx_bad_packet;
1763 if (isr & EMAC_ISR_RP)
1764 ++st->rx_runt_packet;
1765 if (isr & EMAC_ISR_SE)
1766 ++st->rx_short_event;
1767 if (isr & EMAC_ISR_ALE)
1768 ++st->rx_alignment_error;
1769 if (isr & EMAC_ISR_BFCS)
1771 if (isr & EMAC_ISR_PTLE)
1772 ++st->rx_packet_too_long;
1773 if (isr & EMAC_ISR_ORE)
1774 ++st->rx_out_of_range;
1775 if (isr & EMAC_ISR_IRE)
1777 if (isr & EMAC_ISR_SQE)
1779 if (isr & EMAC_ISR_TE)
1782 spin_unlock(&dev->lock);
1787 static struct net_device_stats *emac_stats(struct net_device *ndev)
1789 struct emac_instance *dev = netdev_priv(ndev);
1790 struct emac_stats *st = &dev->stats;
1791 struct emac_error_stats *est = &dev->estats;
1792 struct net_device_stats *nst = &dev->nstats;
1793 unsigned long flags;
1795 DBG2(dev, "stats" NL);
1797 /* Compute "legacy" statistics */
1798 spin_lock_irqsave(&dev->lock, flags);
1799 nst->rx_packets = (unsigned long)st->rx_packets;
1800 nst->rx_bytes = (unsigned long)st->rx_bytes;
1801 nst->tx_packets = (unsigned long)st->tx_packets;
1802 nst->tx_bytes = (unsigned long)st->tx_bytes;
1803 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1804 est->rx_dropped_error +
1805 est->rx_dropped_resize +
1806 est->rx_dropped_mtu);
1807 nst->tx_dropped = (unsigned long)est->tx_dropped;
1809 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1810 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1811 est->rx_fifo_overrun +
1813 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1814 est->rx_alignment_error);
1815 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1817 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1818 est->rx_bd_short_event +
1819 est->rx_bd_packet_too_long +
1820 est->rx_bd_out_of_range +
1821 est->rx_bd_in_range +
1822 est->rx_runt_packet +
1823 est->rx_short_event +
1824 est->rx_packet_too_long +
1825 est->rx_out_of_range +
1828 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1829 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1831 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1832 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1833 est->tx_bd_excessive_collisions +
1834 est->tx_bd_late_collision +
1835 est->tx_bd_multple_collisions);
1836 spin_unlock_irqrestore(&dev->lock, flags);
1840 static struct mal_commac_ops emac_commac_ops = {
1841 .poll_tx = &emac_poll_tx,
1842 .poll_rx = &emac_poll_rx,
1843 .peek_rx = &emac_peek_rx,
1847 static struct mal_commac_ops emac_commac_sg_ops = {
1848 .poll_tx = &emac_poll_tx,
1849 .poll_rx = &emac_poll_rx,
1850 .peek_rx = &emac_peek_rx_sg,
1854 /* Ethtool support */
1855 static int emac_ethtool_get_settings(struct net_device *ndev,
1856 struct ethtool_cmd *cmd)
1858 struct emac_instance *dev = netdev_priv(ndev);
1860 cmd->supported = dev->phy.features;
1861 cmd->port = PORT_MII;
1862 cmd->phy_address = dev->phy.address;
1864 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1866 mutex_lock(&dev->link_lock);
1867 cmd->advertising = dev->phy.advertising;
1868 cmd->autoneg = dev->phy.autoneg;
1869 cmd->speed = dev->phy.speed;
1870 cmd->duplex = dev->phy.duplex;
1871 mutex_unlock(&dev->link_lock);
1876 static int emac_ethtool_set_settings(struct net_device *ndev,
1877 struct ethtool_cmd *cmd)
1879 struct emac_instance *dev = netdev_priv(ndev);
1880 u32 f = dev->phy.features;
1882 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1883 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1885 /* Basic sanity checks */
1886 if (dev->phy.address < 0)
1888 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1890 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1892 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1895 if (cmd->autoneg == AUTONEG_DISABLE) {
1896 switch (cmd->speed) {
1898 if (cmd->duplex == DUPLEX_HALF
1899 && !(f & SUPPORTED_10baseT_Half))
1901 if (cmd->duplex == DUPLEX_FULL
1902 && !(f & SUPPORTED_10baseT_Full))
1906 if (cmd->duplex == DUPLEX_HALF
1907 && !(f & SUPPORTED_100baseT_Half))
1909 if (cmd->duplex == DUPLEX_FULL
1910 && !(f & SUPPORTED_100baseT_Full))
1914 if (cmd->duplex == DUPLEX_HALF
1915 && !(f & SUPPORTED_1000baseT_Half))
1917 if (cmd->duplex == DUPLEX_FULL
1918 && !(f & SUPPORTED_1000baseT_Full))
1925 mutex_lock(&dev->link_lock);
1926 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1928 mutex_unlock(&dev->link_lock);
1931 if (!(f & SUPPORTED_Autoneg))
1934 mutex_lock(&dev->link_lock);
1935 dev->phy.def->ops->setup_aneg(&dev->phy,
1936 (cmd->advertising & f) |
1937 (dev->phy.advertising &
1939 ADVERTISED_Asym_Pause)));
1940 mutex_unlock(&dev->link_lock);
1942 emac_force_link_update(dev);
1947 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1948 struct ethtool_ringparam *rp)
1950 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1951 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1954 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1955 struct ethtool_pauseparam *pp)
1957 struct emac_instance *dev = netdev_priv(ndev);
1959 mutex_lock(&dev->link_lock);
1960 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1961 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1964 if (dev->phy.duplex == DUPLEX_FULL) {
1966 pp->rx_pause = pp->tx_pause = 1;
1967 else if (dev->phy.asym_pause)
1970 mutex_unlock(&dev->link_lock);
1973 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1975 struct emac_instance *dev = netdev_priv(ndev);
1977 return dev->tah_dev != NULL;
1980 static int emac_get_regs_len(struct emac_instance *dev)
1982 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1983 return sizeof(struct emac_ethtool_regs_subhdr) +
1984 EMAC4_ETHTOOL_REGS_SIZE;
1986 return sizeof(struct emac_ethtool_regs_subhdr) +
1987 EMAC_ETHTOOL_REGS_SIZE;
1990 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1992 struct emac_instance *dev = netdev_priv(ndev);
1995 size = sizeof(struct emac_ethtool_regs_hdr) +
1996 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1997 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1998 size += zmii_get_regs_len(dev->zmii_dev);
1999 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2000 size += rgmii_get_regs_len(dev->rgmii_dev);
2001 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2002 size += tah_get_regs_len(dev->tah_dev);
2007 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2009 struct emac_ethtool_regs_subhdr *hdr = buf;
2011 hdr->index = dev->cell_index;
2012 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2013 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2014 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2015 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2017 hdr->version = EMAC_ETHTOOL_REGS_VER;
2018 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2019 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2023 static void emac_ethtool_get_regs(struct net_device *ndev,
2024 struct ethtool_regs *regs, void *buf)
2026 struct emac_instance *dev = netdev_priv(ndev);
2027 struct emac_ethtool_regs_hdr *hdr = buf;
2029 hdr->components = 0;
2032 buf = mal_dump_regs(dev->mal, buf);
2033 buf = emac_dump_regs(dev, buf);
2034 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2035 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2036 buf = zmii_dump_regs(dev->zmii_dev, buf);
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2039 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2040 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2042 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2043 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2044 buf = tah_dump_regs(dev->tah_dev, buf);
2048 static int emac_ethtool_nway_reset(struct net_device *ndev)
2050 struct emac_instance *dev = netdev_priv(ndev);
2053 DBG(dev, "nway_reset" NL);
2055 if (dev->phy.address < 0)
2058 mutex_lock(&dev->link_lock);
2059 if (!dev->phy.autoneg) {
2064 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2066 mutex_unlock(&dev->link_lock);
2067 emac_force_link_update(dev);
2071 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2073 return EMAC_ETHTOOL_STATS_COUNT;
2076 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2079 if (stringset == ETH_SS_STATS)
2080 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2083 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2084 struct ethtool_stats *estats,
2087 struct emac_instance *dev = netdev_priv(ndev);
2089 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2090 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2091 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2094 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2095 struct ethtool_drvinfo *info)
2097 struct emac_instance *dev = netdev_priv(ndev);
2099 strcpy(info->driver, "ibm_emac");
2100 strcpy(info->version, DRV_VERSION);
2101 info->fw_version[0] = '\0';
2102 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2103 dev->cell_index, dev->ofdev->node->full_name);
2104 info->n_stats = emac_ethtool_get_stats_count(ndev);
2105 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2108 static const struct ethtool_ops emac_ethtool_ops = {
2109 .get_settings = emac_ethtool_get_settings,
2110 .set_settings = emac_ethtool_set_settings,
2111 .get_drvinfo = emac_ethtool_get_drvinfo,
2113 .get_regs_len = emac_ethtool_get_regs_len,
2114 .get_regs = emac_ethtool_get_regs,
2116 .nway_reset = emac_ethtool_nway_reset,
2118 .get_ringparam = emac_ethtool_get_ringparam,
2119 .get_pauseparam = emac_ethtool_get_pauseparam,
2121 .get_rx_csum = emac_ethtool_get_rx_csum,
2123 .get_strings = emac_ethtool_get_strings,
2124 .get_stats_count = emac_ethtool_get_stats_count,
2125 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2127 .get_link = ethtool_op_get_link,
2128 .get_tx_csum = ethtool_op_get_tx_csum,
2129 .get_sg = ethtool_op_get_sg,
2132 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2134 struct emac_instance *dev = netdev_priv(ndev);
2135 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2137 DBG(dev, "ioctl %08x" NL, cmd);
2139 if (dev->phy.address < 0)
2144 case SIOCDEVPRIVATE:
2145 data[0] = dev->phy.address;
2148 case SIOCDEVPRIVATE + 1:
2149 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2153 case SIOCDEVPRIVATE + 2:
2154 if (!capable(CAP_NET_ADMIN))
2156 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2163 struct emac_depentry {
2165 struct device_node *node;
2166 struct of_device *ofdev;
2170 #define EMAC_DEP_MAL_IDX 0
2171 #define EMAC_DEP_ZMII_IDX 1
2172 #define EMAC_DEP_RGMII_IDX 2
2173 #define EMAC_DEP_TAH_IDX 3
2174 #define EMAC_DEP_MDIO_IDX 4
2175 #define EMAC_DEP_PREV_IDX 5
2176 #define EMAC_DEP_COUNT 6
2178 static int __devinit emac_check_deps(struct emac_instance *dev,
2179 struct emac_depentry *deps)
2182 struct device_node *np;
2184 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2185 /* no dependency on that item, allright */
2186 if (deps[i].phandle == 0) {
2190 /* special case for blist as the dependency might go away */
2191 if (i == EMAC_DEP_PREV_IDX) {
2192 np = *(dev->blist - 1);
2194 deps[i].phandle = 0;
2198 if (deps[i].node == NULL)
2199 deps[i].node = of_node_get(np);
2201 if (deps[i].node == NULL)
2202 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2203 if (deps[i].node == NULL)
2205 if (deps[i].ofdev == NULL)
2206 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2207 if (deps[i].ofdev == NULL)
2209 if (deps[i].drvdata == NULL)
2210 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2211 if (deps[i].drvdata != NULL)
2214 return (there == EMAC_DEP_COUNT);
2217 static void emac_put_deps(struct emac_instance *dev)
2220 of_dev_put(dev->mal_dev);
2222 of_dev_put(dev->zmii_dev);
2224 of_dev_put(dev->rgmii_dev);
2226 of_dev_put(dev->mdio_dev);
2228 of_dev_put(dev->tah_dev);
2231 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2232 unsigned long action, void *data)
2234 /* We are only intereted in device addition */
2235 if (action == BUS_NOTIFY_BOUND_DRIVER)
2236 wake_up_all(&emac_probe_wait);
2240 static struct notifier_block emac_of_bus_notifier = {
2241 .notifier_call = emac_of_bus_notify
2244 static int __devinit emac_wait_deps(struct emac_instance *dev)
2246 struct emac_depentry deps[EMAC_DEP_COUNT];
2249 memset(&deps, 0, sizeof(deps));
2251 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2252 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2253 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2255 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2257 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2258 if (dev->blist && dev->blist > emac_boot_list)
2259 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2260 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2261 wait_event_timeout(emac_probe_wait,
2262 emac_check_deps(dev, deps),
2263 EMAC_PROBE_DEP_TIMEOUT);
2264 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2265 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2266 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2268 of_node_put(deps[i].node);
2269 if (err && deps[i].ofdev)
2270 of_dev_put(deps[i].ofdev);
2273 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2274 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2275 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2276 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2277 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2279 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2280 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2284 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2285 u32 *val, int fatal)
2288 const u32 *prop = of_get_property(np, name, &len);
2289 if (prop == NULL || len < sizeof(u32)) {
2291 printk(KERN_ERR "%s: missing %s property\n",
2292 np->full_name, name);
2299 static int __devinit emac_init_phy(struct emac_instance *dev)
2301 struct device_node *np = dev->ofdev->node;
2302 struct net_device *ndev = dev->ndev;
2306 dev->phy.dev = ndev;
2307 dev->phy.mode = dev->phy_mode;
2309 /* PHY-less configuration.
2310 * XXX I probably should move these settings to the dev tree
2312 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2315 /* PHY-less configuration.
2316 * XXX I probably should move these settings to the dev tree
2318 dev->phy.address = -1;
2319 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2325 mutex_lock(&emac_phy_map_lock);
2326 phy_map = dev->phy_map | busy_phy_map;
2328 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2330 dev->phy.mdio_read = emac_mdio_read;
2331 dev->phy.mdio_write = emac_mdio_write;
2333 /* Configure EMAC with defaults so we can at least use MDIO
2334 * This is needed mostly for 440GX
2336 if (emac_phy_gpcs(dev->phy.mode)) {
2338 * Make GPCS PHY address equal to EMAC index.
2339 * We probably should take into account busy_phy_map
2340 * and/or phy_map here.
2342 * Note that the busy_phy_map is currently global
2343 * while it should probably be per-ASIC...
2345 dev->phy.address = dev->cell_index;
2348 emac_configure(dev);
2350 if (dev->phy_address != 0xffffffff)
2351 phy_map = ~(1 << dev->phy_address);
2353 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2354 if (!(phy_map & 1)) {
2356 busy_phy_map |= 1 << i;
2358 /* Quick check if there is a PHY at the address */
2359 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2360 if (r == 0xffff || r < 0)
2362 if (!emac_mii_phy_probe(&dev->phy, i))
2365 mutex_unlock(&emac_phy_map_lock);
2367 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2372 if (dev->phy.def->ops->init)
2373 dev->phy.def->ops->init(&dev->phy);
2375 /* Disable any PHY features not supported by the platform */
2376 dev->phy.def->features &= ~dev->phy_feat_exc;
2378 /* Setup initial link parameters */
2379 if (dev->phy.features & SUPPORTED_Autoneg) {
2380 adv = dev->phy.features;
2381 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2382 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2383 /* Restart autonegotiation */
2384 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2386 u32 f = dev->phy.def->features;
2387 int speed = SPEED_10, fd = DUPLEX_HALF;
2389 /* Select highest supported speed/duplex */
2390 if (f & SUPPORTED_1000baseT_Full) {
2393 } else if (f & SUPPORTED_1000baseT_Half)
2395 else if (f & SUPPORTED_100baseT_Full) {
2398 } else if (f & SUPPORTED_100baseT_Half)
2400 else if (f & SUPPORTED_10baseT_Full)
2403 /* Force link parameters */
2404 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2409 static int __devinit emac_init_config(struct emac_instance *dev)
2411 struct device_node *np = dev->ofdev->node;
2414 const char *pm, *phy_modes[] = {
2416 [PHY_MODE_MII] = "mii",
2417 [PHY_MODE_RMII] = "rmii",
2418 [PHY_MODE_SMII] = "smii",
2419 [PHY_MODE_RGMII] = "rgmii",
2420 [PHY_MODE_TBI] = "tbi",
2421 [PHY_MODE_GMII] = "gmii",
2422 [PHY_MODE_RTBI] = "rtbi",
2423 [PHY_MODE_SGMII] = "sgmii",
2426 /* Read config from device-tree */
2427 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2429 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2431 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2433 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2435 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2436 dev->max_mtu = 1500;
2437 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2438 dev->rx_fifo_size = 2048;
2439 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2440 dev->tx_fifo_size = 2048;
2441 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2442 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2443 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2444 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2445 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2446 dev->phy_address = 0xffffffff;
2447 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2448 dev->phy_map = 0xffffffff;
2449 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2451 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2453 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2455 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2457 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2459 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2460 dev->zmii_port = 0xffffffff;;
2461 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2463 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2464 dev->rgmii_port = 0xffffffff;;
2465 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2466 dev->fifo_entry_size = 16;
2467 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2468 dev->mal_burst_size = 256;
2470 /* PHY mode needs some decoding */
2471 dev->phy_mode = PHY_MODE_NA;
2472 pm = of_get_property(np, "phy-mode", &plen);
2475 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2476 if (!strcasecmp(pm, phy_modes[i])) {
2482 /* Backward compat with non-final DT */
2483 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2484 u32 nmode = *(const u32 *)pm;
2485 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2486 dev->phy_mode = nmode;
2489 /* Check EMAC version */
2490 if (of_device_is_compatible(np, "ibm,emac4"))
2491 dev->features |= EMAC_FTR_EMAC4;
2493 /* Fixup some feature bits based on the device tree */
2494 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2495 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2496 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2497 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2499 /* CAB lacks the appropriate properties */
2500 if (of_device_is_compatible(np, "ibm,emac-axon"))
2501 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2502 EMAC_FTR_STACR_OC_INVERT;
2504 /* Enable TAH/ZMII/RGMII features as found */
2505 if (dev->tah_ph != 0) {
2506 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2507 dev->features |= EMAC_FTR_HAS_TAH;
2509 printk(KERN_ERR "%s: TAH support not enabled !\n",
2515 if (dev->zmii_ph != 0) {
2516 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2517 dev->features |= EMAC_FTR_HAS_ZMII;
2519 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2525 if (dev->rgmii_ph != 0) {
2526 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2527 dev->features |= EMAC_FTR_HAS_RGMII;
2529 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2535 /* Read MAC-address */
2536 p = of_get_property(np, "local-mac-address", NULL);
2538 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2542 memcpy(dev->ndev->dev_addr, p, 6);
2544 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2545 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2546 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2547 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2548 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2553 static int __devinit emac_probe(struct of_device *ofdev,
2554 const struct of_device_id *match)
2556 struct net_device *ndev;
2557 struct emac_instance *dev;
2558 struct device_node *np = ofdev->node;
2559 struct device_node **blist = NULL;
2562 /* Skip unused/unwired EMACS */
2563 if (of_get_property(np, "unused", NULL))
2566 /* Find ourselves in the bootlist if we are there */
2567 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2568 if (emac_boot_list[i] == np)
2569 blist = &emac_boot_list[i];
2571 /* Allocate our net_device structure */
2573 ndev = alloc_etherdev(sizeof(struct emac_instance));
2575 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2579 dev = netdev_priv(ndev);
2583 SET_NETDEV_DEV(ndev, &ofdev->dev);
2585 /* Initialize some embedded data structures */
2586 mutex_init(&dev->mdio_lock);
2587 mutex_init(&dev->link_lock);
2588 spin_lock_init(&dev->lock);
2589 INIT_WORK(&dev->reset_work, emac_reset_work);
2591 /* Init various config data based on device-tree */
2592 err = emac_init_config(dev);
2596 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2597 dev->emac_irq = irq_of_parse_and_map(np, 0);
2598 dev->wol_irq = irq_of_parse_and_map(np, 1);
2599 if (dev->emac_irq == NO_IRQ) {
2600 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2603 ndev->irq = dev->emac_irq;
2606 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2607 printk(KERN_ERR "%s: Can't get registers address\n",
2611 // TODO : request_mem_region
2612 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2613 if (dev->emacp == NULL) {
2614 printk(KERN_ERR "%s: Can't map device registers!\n",
2620 /* Wait for dependent devices */
2621 err = emac_wait_deps(dev);
2624 "%s: Timeout waiting for dependent devices\n",
2626 /* display more info about what's missing ? */
2629 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2630 if (dev->mdio_dev != NULL)
2631 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2633 /* Register with MAL */
2634 dev->commac.ops = &emac_commac_ops;
2635 dev->commac.dev = dev;
2636 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2637 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2638 err = mal_register_commac(dev->mal, &dev->commac);
2640 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2641 np->full_name, dev->mal_dev->node->full_name);
2644 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2645 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2647 /* Get pointers to BD rings */
2649 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2651 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2653 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2654 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2657 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2658 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2660 /* Attach to ZMII, if needed */
2661 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2662 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2663 goto err_unreg_commac;
2665 /* Attach to RGMII, if needed */
2666 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2667 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2668 goto err_detach_zmii;
2670 /* Attach to TAH, if needed */
2671 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2672 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2673 goto err_detach_rgmii;
2675 /* Set some link defaults before we can find out real parameters */
2676 dev->phy.speed = SPEED_100;
2677 dev->phy.duplex = DUPLEX_FULL;
2678 dev->phy.autoneg = AUTONEG_DISABLE;
2679 dev->phy.pause = dev->phy.asym_pause = 0;
2680 dev->stop_timeout = STOP_TIMEOUT_100;
2681 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2683 /* Find PHY if any */
2684 err = emac_init_phy(dev);
2686 goto err_detach_tah;
2688 /* Fill in the driver function table */
2689 ndev->open = &emac_open;
2691 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2692 ndev->tx_timeout = &emac_tx_timeout;
2693 ndev->watchdog_timeo = 5 * HZ;
2694 ndev->stop = &emac_close;
2695 ndev->get_stats = &emac_stats;
2696 ndev->set_multicast_list = &emac_set_multicast_list;
2697 ndev->do_ioctl = &emac_ioctl;
2698 if (emac_phy_supports_gige(dev->phy_mode)) {
2699 ndev->hard_start_xmit = &emac_start_xmit_sg;
2700 ndev->change_mtu = &emac_change_mtu;
2701 dev->commac.ops = &emac_commac_sg_ops;
2703 ndev->hard_start_xmit = &emac_start_xmit;
2705 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2707 netif_carrier_off(ndev);
2708 netif_stop_queue(ndev);
2710 err = register_netdev(ndev);
2712 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2713 np->full_name, err);
2714 goto err_detach_tah;
2717 /* Set our drvdata last as we don't want them visible until we are
2721 dev_set_drvdata(&ofdev->dev, dev);
2723 /* There's a new kid in town ! Let's tell everybody */
2724 wake_up_all(&emac_probe_wait);
2728 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2729 ndev->name, dev->cell_index, np->full_name,
2730 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2731 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2733 if (dev->phy.address >= 0)
2734 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2735 dev->phy.def->name, dev->phy.address);
2737 emac_dbg_register(dev);
2742 /* I have a bad feeling about this ... */
2745 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2746 tah_detach(dev->tah_dev, dev->tah_port);
2748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2749 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2751 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2752 zmii_detach(dev->zmii_dev, dev->zmii_port);
2754 mal_unregister_commac(dev->mal, &dev->commac);
2758 iounmap(dev->emacp);
2760 if (dev->wol_irq != NO_IRQ)
2761 irq_dispose_mapping(dev->wol_irq);
2762 if (dev->emac_irq != NO_IRQ)
2763 irq_dispose_mapping(dev->emac_irq);
2767 /* if we were on the bootlist, remove us as we won't show up and
2768 * wake up all waiters to notify them in case they were waiting
2773 wake_up_all(&emac_probe_wait);
2778 static int __devexit emac_remove(struct of_device *ofdev)
2780 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2782 DBG(dev, "remove" NL);
2784 dev_set_drvdata(&ofdev->dev, NULL);
2786 unregister_netdev(dev->ndev);
2788 flush_scheduled_work();
2790 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2791 tah_detach(dev->tah_dev, dev->tah_port);
2792 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2793 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2794 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2795 zmii_detach(dev->zmii_dev, dev->zmii_port);
2797 mal_unregister_commac(dev->mal, &dev->commac);
2800 emac_dbg_unregister(dev);
2801 iounmap(dev->emacp);
2803 if (dev->wol_irq != NO_IRQ)
2804 irq_dispose_mapping(dev->wol_irq);
2805 if (dev->emac_irq != NO_IRQ)
2806 irq_dispose_mapping(dev->emac_irq);
2813 /* XXX Features in here should be replaced by properties... */
2814 static struct of_device_id emac_match[] =
2818 .compatible = "ibm,emac",
2822 .compatible = "ibm,emac4",
2827 static struct of_platform_driver emac_driver = {
2829 .match_table = emac_match,
2831 .probe = emac_probe,
2832 .remove = emac_remove,
2835 static void __init emac_make_bootlist(void)
2837 struct device_node *np = NULL;
2838 int j, max, i = 0, k;
2839 int cell_indices[EMAC_BOOT_LIST_SIZE];
2842 while((np = of_find_all_nodes(np)) != NULL) {
2845 if (of_match_node(emac_match, np) == NULL)
2847 if (of_get_property(np, "unused", NULL))
2849 idx = of_get_property(np, "cell-index", NULL);
2852 cell_indices[i] = *idx;
2853 emac_boot_list[i++] = of_node_get(np);
2854 if (i >= EMAC_BOOT_LIST_SIZE) {
2861 /* Bubble sort them (doh, what a creative algorithm :-) */
2862 for (i = 0; max > 1 && (i < (max - 1)); i++)
2863 for (j = i; j < max; j++) {
2864 if (cell_indices[i] > cell_indices[j]) {
2865 np = emac_boot_list[i];
2866 emac_boot_list[i] = emac_boot_list[j];
2867 emac_boot_list[j] = np;
2868 k = cell_indices[i];
2869 cell_indices[i] = cell_indices[j];
2870 cell_indices[j] = k;
2875 static int __init emac_init(void)
2879 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2881 /* Init debug stuff */
2884 /* Build EMAC boot list */
2885 emac_make_bootlist();
2887 /* Init submodules */
2900 rc = of_register_platform_driver(&emac_driver);
2918 static void __exit emac_exit(void)
2922 of_unregister_platform_driver(&emac_driver);
2930 /* Destroy EMAC boot list */
2931 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2932 if (emac_boot_list[i])
2933 of_node_put(emac_boot_list[i]);
2936 module_init(emac_init);
2937 module_exit(emac_exit);