2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
49 * Lack of dma_unmap_???? calls is intentional.
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
65 #define DRV_NAME "emac"
66 #define DRV_VERSION "3.54"
67 #define DRV_DESC "PPC 4xx OCP EMAC driver"
69 MODULE_DESCRIPTION(DRV_DESC);
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
72 MODULE_LICENSE("GPL");
75 * PPC64 doesn't (yet) have a cacheable_memcpy
78 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
84 /* If packet size is less than this number, we allocate small skb and copy packet
85 * contents into it instead of just sending original big skb up
87 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
89 /* Since multiple EMACs share MDIO lines in various ways, we need
90 * to avoid re-using the same PHY ID in cases where the arch didn't
91 * setup precise phy_map entries
93 * XXX This is something that needs to be reworked as we can have multiple
94 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
95 * probably require in that case to have explicit PHY IDs in the device-tree
97 static u32 busy_phy_map;
98 static DEFINE_MUTEX(emac_phy_map_lock);
100 /* This is the wait queue used to wait on any event related to probe, that
101 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105 /* Having stable interface names is a doomed idea. However, it would be nice
106 * if we didn't have completely random interface names at boot too :-) It's
107 * just a matter of making everybody's life easier. Since we are doing
108 * threaded probing, it's a bit harder though. The base idea here is that
109 * we make up a list of all emacs in the device-tree before we register the
110 * driver. Every emac will then wait for the previous one in the list to
111 * initialize before itself. We should also keep that list ordered by
113 * That list is only 4 entries long, meaning that additional EMACs don't
114 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
117 #define EMAC_BOOT_LIST_SIZE 4
118 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120 /* How long should I wait for dependent devices ? */
121 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
123 /* I don't want to litter system log with timeout errors
124 * when we have brain-damaged PHY.
126 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
133 /* PHY polling intervals */
134 #define PHY_POLL_LINK_ON HZ
135 #define PHY_POLL_LINK_OFF (HZ / 5)
137 /* Graceful stop timeouts in us.
138 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
140 #define STOP_TIMEOUT_10 1230
141 #define STOP_TIMEOUT_100 124
142 #define STOP_TIMEOUT_1000 13
143 #define STOP_TIMEOUT_1000_JUMBO 73
145 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
146 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
147 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
148 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
149 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
150 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
151 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
152 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
153 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
154 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
155 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
156 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
157 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
158 "tx_bd_excessive_collisions", "tx_bd_late_collision",
159 "tx_bd_multple_collisions", "tx_bd_single_collision",
160 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
164 static irqreturn_t emac_irq(int irq, void *dev_instance);
165 static void emac_clean_tx_ring(struct emac_instance *dev);
166 static void __emac_set_multicast_list(struct emac_instance *dev);
168 static inline int emac_phy_supports_gige(int phy_mode)
170 return phy_mode == PHY_MODE_GMII ||
171 phy_mode == PHY_MODE_RGMII ||
172 phy_mode == PHY_MODE_TBI ||
173 phy_mode == PHY_MODE_RTBI;
176 static inline int emac_phy_gpcs(int phy_mode)
178 return phy_mode == PHY_MODE_TBI ||
179 phy_mode == PHY_MODE_RTBI;
182 static inline void emac_tx_enable(struct emac_instance *dev)
184 struct emac_regs __iomem *p = dev->emacp;
187 DBG(dev, "tx_enable" NL);
189 r = in_be32(&p->mr0);
190 if (!(r & EMAC_MR0_TXE))
191 out_be32(&p->mr0, r | EMAC_MR0_TXE);
194 static void emac_tx_disable(struct emac_instance *dev)
196 struct emac_regs __iomem *p = dev->emacp;
199 DBG(dev, "tx_disable" NL);
201 r = in_be32(&p->mr0);
202 if (r & EMAC_MR0_TXE) {
203 int n = dev->stop_timeout;
204 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
205 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
210 emac_report_timeout_error(dev, "TX disable timeout");
214 static void emac_rx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
222 DBG(dev, "rx_enable" NL);
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_RXE)) {
226 if (unlikely(!(r & EMAC_MR0_RXI))) {
227 /* Wait if previous async disable is still in progress */
228 int n = dev->stop_timeout;
229 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
234 emac_report_timeout_error(dev,
235 "RX disable timeout");
237 out_be32(&p->mr0, r | EMAC_MR0_RXE);
243 static void emac_rx_disable(struct emac_instance *dev)
245 struct emac_regs __iomem *p = dev->emacp;
248 DBG(dev, "rx_disable" NL);
250 r = in_be32(&p->mr0);
251 if (r & EMAC_MR0_RXE) {
252 int n = dev->stop_timeout;
253 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
254 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
259 emac_report_timeout_error(dev, "RX disable timeout");
263 static inline void emac_netif_stop(struct emac_instance *dev)
265 netif_tx_lock_bh(dev->ndev);
267 netif_tx_unlock_bh(dev->ndev);
268 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
269 mal_poll_disable(dev->mal, &dev->commac);
270 netif_tx_disable(dev->ndev);
273 static inline void emac_netif_start(struct emac_instance *dev)
275 netif_tx_lock_bh(dev->ndev);
277 if (dev->mcast_pending && netif_running(dev->ndev))
278 __emac_set_multicast_list(dev);
279 netif_tx_unlock_bh(dev->ndev);
281 netif_wake_queue(dev->ndev);
283 /* NOTE: unconditional netif_wake_queue is only appropriate
284 * so long as all callers are assured to have free tx slots
285 * (taken from tg3... though the case where that is wrong is
286 * not terribly harmful)
288 mal_poll_enable(dev->mal, &dev->commac);
291 static inline void emac_rx_disable_async(struct emac_instance *dev)
293 struct emac_regs __iomem *p = dev->emacp;
296 DBG(dev, "rx_disable_async" NL);
298 r = in_be32(&p->mr0);
299 if (r & EMAC_MR0_RXE)
300 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
303 static int emac_reset(struct emac_instance *dev)
305 struct emac_regs __iomem *p = dev->emacp;
308 DBG(dev, "reset" NL);
310 if (!dev->reset_failed) {
311 /* 40x erratum suggests stopping RX channel before reset,
314 emac_rx_disable(dev);
315 emac_tx_disable(dev);
318 out_be32(&p->mr0, EMAC_MR0_SRST);
319 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
323 dev->reset_failed = 0;
326 emac_report_timeout_error(dev, "reset timeout");
327 dev->reset_failed = 1;
332 static void emac_hash_mc(struct emac_instance *dev)
334 struct emac_regs __iomem *p = dev->emacp;
336 struct dev_mc_list *dmi;
338 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
340 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
342 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
343 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
344 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
346 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
347 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
349 out_be32(&p->gaht1, gaht[0]);
350 out_be32(&p->gaht2, gaht[1]);
351 out_be32(&p->gaht3, gaht[2]);
352 out_be32(&p->gaht4, gaht[3]);
355 static inline u32 emac_iff2rmr(struct net_device *ndev)
357 struct emac_instance *dev = netdev_priv(ndev);
360 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
362 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
367 if (ndev->flags & IFF_PROMISC)
369 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
371 else if (ndev->mc_count > 0)
377 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
379 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
381 DBG2(dev, "__emac_calc_base_mr1" NL);
385 ret |= EMAC_MR1_TFS_2K;
388 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
389 dev->ndev->name, tx_size);
394 ret |= EMAC_MR1_RFS_16K;
397 ret |= EMAC_MR1_RFS_4K;
400 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
401 dev->ndev->name, rx_size);
407 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
409 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
410 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
412 DBG2(dev, "__emac4_calc_base_mr1" NL);
416 ret |= EMAC4_MR1_TFS_4K;
419 ret |= EMAC4_MR1_TFS_2K;
422 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
423 dev->ndev->name, tx_size);
428 ret |= EMAC4_MR1_RFS_16K;
431 ret |= EMAC4_MR1_RFS_4K;
434 ret |= EMAC4_MR1_RFS_2K;
437 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
438 dev->ndev->name, rx_size);
444 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
446 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
447 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
448 __emac_calc_base_mr1(dev, tx_size, rx_size);
451 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
453 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
454 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
456 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
459 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
460 unsigned int low, unsigned int high)
462 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
463 return (low << 22) | ( (high & 0x3ff) << 6);
465 return (low << 23) | ( (high & 0x1ff) << 7);
468 static int emac_configure(struct emac_instance *dev)
470 struct emac_regs __iomem *p = dev->emacp;
471 struct net_device *ndev = dev->ndev;
472 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
475 DBG(dev, "configure" NL);
478 out_be32(&p->mr1, in_be32(&p->mr1)
479 | EMAC_MR1_FDE | EMAC_MR1_ILE);
481 } else if (emac_reset(dev) < 0)
484 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
485 tah_reset(dev->tah_dev);
487 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
488 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
490 /* Default fifo sizes */
491 tx_size = dev->tx_fifo_size;
492 rx_size = dev->rx_fifo_size;
494 /* No link, force loopback */
496 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
498 /* Check for full duplex */
499 else if (dev->phy.duplex == DUPLEX_FULL)
500 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
502 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
503 dev->stop_timeout = STOP_TIMEOUT_10;
504 switch (dev->phy.speed) {
506 if (emac_phy_gpcs(dev->phy.mode)) {
507 mr1 |= EMAC_MR1_MF_1000GPCS |
508 EMAC_MR1_MF_IPPA(dev->phy.address);
510 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
511 * identify this GPCS PHY later.
513 out_be32(&p->ipcr, 0xdeadbeef);
515 mr1 |= EMAC_MR1_MF_1000;
517 /* Extended fifo sizes */
518 tx_size = dev->tx_fifo_size_gige;
519 rx_size = dev->rx_fifo_size_gige;
521 if (dev->ndev->mtu > ETH_DATA_LEN) {
522 mr1 |= EMAC_MR1_JPSM;
523 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
525 dev->stop_timeout = STOP_TIMEOUT_1000;
528 mr1 |= EMAC_MR1_MF_100;
529 dev->stop_timeout = STOP_TIMEOUT_100;
531 default: /* make gcc happy */
535 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
536 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
538 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
539 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
541 /* on 40x erratum forces us to NOT use integrated flow control,
542 * let's hope it works on 44x ;)
544 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
545 dev->phy.duplex == DUPLEX_FULL) {
547 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
548 else if (dev->phy.asym_pause)
552 /* Add base settings & fifo sizes & program MR1 */
553 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
554 out_be32(&p->mr1, mr1);
556 /* Set individual MAC address */
557 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
558 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
559 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
562 /* VLAN Tag Protocol ID */
563 out_be32(&p->vtpid, 0x8100);
565 /* Receive mode register */
566 r = emac_iff2rmr(ndev);
567 if (r & EMAC_RMR_MAE)
569 out_be32(&p->rmr, r);
571 /* FIFOs thresholds */
572 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
573 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
574 tx_size / 2 / dev->fifo_entry_size);
576 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
577 tx_size / 2 / dev->fifo_entry_size);
578 out_be32(&p->tmr1, r);
579 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
581 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
582 there should be still enough space in FIFO to allow the our link
583 partner time to process this frame and also time to send PAUSE
586 Here is the worst case scenario for the RX FIFO "headroom"
587 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
589 1) One maximum-length frame on TX 1522 bytes
590 2) One PAUSE frame time 64 bytes
591 3) PAUSE frame decode time allowance 64 bytes
592 4) One maximum-length frame on RX 1522 bytes
593 5) Round-trip propagation delay of the link (100Mb) 15 bytes
597 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
598 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
600 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
601 rx_size / 4 / dev->fifo_entry_size);
602 out_be32(&p->rwmr, r);
604 /* Set PAUSE timer to the maximum */
605 out_be32(&p->ptr, 0xffff);
608 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
609 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
610 EMAC_ISR_IRE | EMAC_ISR_TE;
611 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
612 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
614 out_be32(&p->iser, r);
616 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
617 if (emac_phy_gpcs(dev->phy.mode))
618 emac_mii_reset_phy(&dev->phy);
623 static void emac_reinitialize(struct emac_instance *dev)
625 DBG(dev, "reinitialize" NL);
627 emac_netif_stop(dev);
628 if (!emac_configure(dev)) {
632 emac_netif_start(dev);
635 static void emac_full_tx_reset(struct emac_instance *dev)
637 DBG(dev, "full_tx_reset" NL);
639 emac_tx_disable(dev);
640 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
641 emac_clean_tx_ring(dev);
642 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
646 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
651 static void emac_reset_work(struct work_struct *work)
653 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
655 DBG(dev, "reset_work" NL);
657 mutex_lock(&dev->link_lock);
659 emac_netif_stop(dev);
660 emac_full_tx_reset(dev);
661 emac_netif_start(dev);
663 mutex_unlock(&dev->link_lock);
666 static void emac_tx_timeout(struct net_device *ndev)
668 struct emac_instance *dev = netdev_priv(ndev);
670 DBG(dev, "tx_timeout" NL);
672 schedule_work(&dev->reset_work);
676 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
678 int done = !!(stacr & EMAC_STACR_OC);
680 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
686 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
688 struct emac_regs __iomem *p = dev->emacp;
690 int n, err = -ETIMEDOUT;
692 mutex_lock(&dev->mdio_lock);
694 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
696 /* Enable proper MDIO port */
697 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
698 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
699 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
700 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
702 /* Wait for management interface to become idle */
704 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
707 DBG2(dev, " -> timeout wait idle\n");
712 /* Issue read command */
713 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
714 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
716 r = EMAC_STACR_BASE(dev->opb_bus_freq);
717 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
719 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
720 r |= EMACX_STACR_STAC_READ;
722 r |= EMAC_STACR_STAC_READ;
723 r |= (reg & EMAC_STACR_PRA_MASK)
724 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
725 out_be32(&p->stacr, r);
727 /* Wait for read to complete */
729 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
732 DBG2(dev, " -> timeout wait complete\n");
737 if (unlikely(r & EMAC_STACR_PHYE)) {
738 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
743 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
745 DBG2(dev, "mdio_read -> %04x" NL, r);
748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
749 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
750 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
751 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
752 mutex_unlock(&dev->mdio_lock);
754 return err == 0 ? r : err;
757 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
760 struct emac_regs __iomem *p = dev->emacp;
762 int n, err = -ETIMEDOUT;
764 mutex_lock(&dev->mdio_lock);
766 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
768 /* Enable proper MDIO port */
769 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
770 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
771 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
772 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
774 /* Wait for management interface to be idle */
776 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
779 DBG2(dev, " -> timeout wait idle\n");
784 /* Issue write command */
785 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
786 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
788 r = EMAC_STACR_BASE(dev->opb_bus_freq);
789 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
791 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
792 r |= EMACX_STACR_STAC_WRITE;
794 r |= EMAC_STACR_STAC_WRITE;
795 r |= (reg & EMAC_STACR_PRA_MASK) |
796 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
797 (val << EMAC_STACR_PHYD_SHIFT);
798 out_be32(&p->stacr, r);
800 /* Wait for write to complete */
802 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
805 DBG2(dev, " -> timeout wait complete\n");
811 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
813 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
814 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
815 mutex_unlock(&dev->mdio_lock);
818 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
820 struct emac_instance *dev = netdev_priv(ndev);
823 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
828 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
830 struct emac_instance *dev = netdev_priv(ndev);
832 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
833 (u8) id, (u8) reg, (u16) val);
837 static void __emac_set_multicast_list(struct emac_instance *dev)
839 struct emac_regs __iomem *p = dev->emacp;
840 u32 rmr = emac_iff2rmr(dev->ndev);
842 DBG(dev, "__multicast %08x" NL, rmr);
844 /* I decided to relax register access rules here to avoid
847 * There is a real problem with EMAC4 core if we use MWSW_001 bit
848 * in MR1 register and do a full EMAC reset.
849 * One TX BD status update is delayed and, after EMAC reset, it
850 * never happens, resulting in TX hung (it'll be recovered by TX
851 * timeout handler eventually, but this is just gross).
852 * So we either have to do full TX reset or try to cheat here :)
854 * The only required change is to RX mode register, so I *think* all
855 * we need is just to stop RX channel. This seems to work on all
858 * If we need the full reset, we might just trigger the workqueue
859 * and do it async... a bit nasty but should work --BenH
861 dev->mcast_pending = 0;
862 emac_rx_disable(dev);
863 if (rmr & EMAC_RMR_MAE)
865 out_be32(&p->rmr, rmr);
870 static void emac_set_multicast_list(struct net_device *ndev)
872 struct emac_instance *dev = netdev_priv(ndev);
874 DBG(dev, "multicast" NL);
876 BUG_ON(!netif_running(dev->ndev));
879 dev->mcast_pending = 1;
882 __emac_set_multicast_list(dev);
885 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
887 int rx_sync_size = emac_rx_sync_size(new_mtu);
888 int rx_skb_size = emac_rx_skb_size(new_mtu);
891 mutex_lock(&dev->link_lock);
892 emac_netif_stop(dev);
893 emac_rx_disable(dev);
894 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
896 if (dev->rx_sg_skb) {
897 ++dev->estats.rx_dropped_resize;
898 dev_kfree_skb(dev->rx_sg_skb);
899 dev->rx_sg_skb = NULL;
902 /* Make a first pass over RX ring and mark BDs ready, dropping
903 * non-processed packets on the way. We need this as a separate pass
904 * to simplify error recovery in the case of allocation failure later.
906 for (i = 0; i < NUM_RX_BUFF; ++i) {
907 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
908 ++dev->estats.rx_dropped_resize;
910 dev->rx_desc[i].data_len = 0;
911 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
912 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
915 /* Reallocate RX ring only if bigger skb buffers are required */
916 if (rx_skb_size <= dev->rx_skb_size)
919 /* Second pass, allocate new skbs */
920 for (i = 0; i < NUM_RX_BUFF; ++i) {
921 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
927 BUG_ON(!dev->rx_skb[i]);
928 dev_kfree_skb(dev->rx_skb[i]);
930 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
931 dev->rx_desc[i].data_ptr =
932 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
933 DMA_FROM_DEVICE) + 2;
934 dev->rx_skb[i] = skb;
937 /* Check if we need to change "Jumbo" bit in MR1 */
938 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
939 /* This is to prevent starting RX channel in emac_rx_enable() */
940 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
942 dev->ndev->mtu = new_mtu;
943 emac_full_tx_reset(dev);
946 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
949 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
951 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
953 emac_netif_start(dev);
954 mutex_unlock(&dev->link_lock);
959 /* Process ctx, rtnl_lock semaphore */
960 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
962 struct emac_instance *dev = netdev_priv(ndev);
965 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
968 DBG(dev, "change_mtu(%d)" NL, new_mtu);
970 if (netif_running(ndev)) {
971 /* Check if we really need to reinitalize RX ring */
972 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
973 ret = emac_resize_rx_ring(dev, new_mtu);
978 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
979 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
985 static void emac_clean_tx_ring(struct emac_instance *dev)
989 for (i = 0; i < NUM_TX_BUFF; ++i) {
990 if (dev->tx_skb[i]) {
991 dev_kfree_skb(dev->tx_skb[i]);
992 dev->tx_skb[i] = NULL;
993 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
994 ++dev->estats.tx_dropped;
996 dev->tx_desc[i].ctrl = 0;
997 dev->tx_desc[i].data_ptr = 0;
1001 static void emac_clean_rx_ring(struct emac_instance *dev)
1005 for (i = 0; i < NUM_RX_BUFF; ++i)
1006 if (dev->rx_skb[i]) {
1007 dev->rx_desc[i].ctrl = 0;
1008 dev_kfree_skb(dev->rx_skb[i]);
1009 dev->rx_skb[i] = NULL;
1010 dev->rx_desc[i].data_ptr = 0;
1013 if (dev->rx_sg_skb) {
1014 dev_kfree_skb(dev->rx_sg_skb);
1015 dev->rx_sg_skb = NULL;
1019 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1022 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1026 dev->rx_skb[slot] = skb;
1027 dev->rx_desc[slot].data_len = 0;
1029 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1030 dev->rx_desc[slot].data_ptr =
1031 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1032 DMA_FROM_DEVICE) + 2;
1034 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1035 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1040 static void emac_print_link_status(struct emac_instance *dev)
1042 if (netif_carrier_ok(dev->ndev))
1043 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1044 dev->ndev->name, dev->phy.speed,
1045 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1046 dev->phy.pause ? ", pause enabled" :
1047 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1049 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1052 /* Process ctx, rtnl_lock semaphore */
1053 static int emac_open(struct net_device *ndev)
1055 struct emac_instance *dev = netdev_priv(ndev);
1058 DBG(dev, "open" NL);
1060 /* Setup error IRQ handler */
1061 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1063 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1064 ndev->name, dev->emac_irq);
1068 /* Allocate RX ring */
1069 for (i = 0; i < NUM_RX_BUFF; ++i)
1070 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1071 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1076 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1077 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1078 dev->rx_sg_skb = NULL;
1080 mutex_lock(&dev->link_lock);
1083 /* Start PHY polling now.
1085 if (dev->phy.address >= 0) {
1086 int link_poll_interval;
1087 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1088 dev->phy.def->ops->read_link(&dev->phy);
1089 netif_carrier_on(dev->ndev);
1090 link_poll_interval = PHY_POLL_LINK_ON;
1092 netif_carrier_off(dev->ndev);
1093 link_poll_interval = PHY_POLL_LINK_OFF;
1095 dev->link_polling = 1;
1097 schedule_delayed_work(&dev->link_work, link_poll_interval);
1098 emac_print_link_status(dev);
1100 netif_carrier_on(dev->ndev);
1102 emac_configure(dev);
1103 mal_poll_add(dev->mal, &dev->commac);
1104 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1105 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1106 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1107 emac_tx_enable(dev);
1108 emac_rx_enable(dev);
1109 emac_netif_start(dev);
1111 mutex_unlock(&dev->link_lock);
1115 emac_clean_rx_ring(dev);
1116 free_irq(dev->emac_irq, dev);
1123 static int emac_link_differs(struct emac_instance *dev)
1125 u32 r = in_be32(&dev->emacp->mr1);
1127 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1128 int speed, pause, asym_pause;
1130 if (r & EMAC_MR1_MF_1000)
1132 else if (r & EMAC_MR1_MF_100)
1137 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1138 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1147 pause = asym_pause = 0;
1149 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1150 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1154 static void emac_link_timer(struct work_struct *work)
1156 struct emac_instance *dev =
1157 container_of((struct delayed_work *)work,
1158 struct emac_instance, link_work);
1159 int link_poll_interval;
1161 mutex_lock(&dev->link_lock);
1162 DBG2(dev, "link timer" NL);
1167 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1168 if (!netif_carrier_ok(dev->ndev)) {
1169 /* Get new link parameters */
1170 dev->phy.def->ops->read_link(&dev->phy);
1172 netif_carrier_on(dev->ndev);
1173 emac_netif_stop(dev);
1174 emac_full_tx_reset(dev);
1175 emac_netif_start(dev);
1176 emac_print_link_status(dev);
1178 link_poll_interval = PHY_POLL_LINK_ON;
1180 if (netif_carrier_ok(dev->ndev)) {
1181 netif_carrier_off(dev->ndev);
1182 netif_tx_disable(dev->ndev);
1183 emac_reinitialize(dev);
1184 emac_print_link_status(dev);
1186 link_poll_interval = PHY_POLL_LINK_OFF;
1188 schedule_delayed_work(&dev->link_work, link_poll_interval);
1190 mutex_unlock(&dev->link_lock);
1193 static void emac_force_link_update(struct emac_instance *dev)
1195 netif_carrier_off(dev->ndev);
1197 if (dev->link_polling) {
1198 cancel_rearming_delayed_work(&dev->link_work);
1199 if (dev->link_polling)
1200 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1204 /* Process ctx, rtnl_lock semaphore */
1205 static int emac_close(struct net_device *ndev)
1207 struct emac_instance *dev = netdev_priv(ndev);
1209 DBG(dev, "close" NL);
1211 if (dev->phy.address >= 0) {
1212 dev->link_polling = 0;
1213 cancel_rearming_delayed_work(&dev->link_work);
1215 mutex_lock(&dev->link_lock);
1216 emac_netif_stop(dev);
1218 mutex_unlock(&dev->link_lock);
1220 emac_rx_disable(dev);
1221 emac_tx_disable(dev);
1222 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1223 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1224 mal_poll_del(dev->mal, &dev->commac);
1226 emac_clean_tx_ring(dev);
1227 emac_clean_rx_ring(dev);
1229 free_irq(dev->emac_irq, dev);
1234 static inline u16 emac_tx_csum(struct emac_instance *dev,
1235 struct sk_buff *skb)
1237 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1238 skb->ip_summed == CHECKSUM_PARTIAL)) {
1239 ++dev->stats.tx_packets_csum;
1240 return EMAC_TX_CTRL_TAH_CSUM;
1245 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1247 struct emac_regs __iomem *p = dev->emacp;
1248 struct net_device *ndev = dev->ndev;
1250 /* Send the packet out. If the if makes a significant perf
1251 * difference, then we can store the TMR0 value in "dev"
1254 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1255 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1257 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1259 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1260 netif_stop_queue(ndev);
1261 DBG2(dev, "stopped TX queue" NL);
1264 ndev->trans_start = jiffies;
1265 ++dev->stats.tx_packets;
1266 dev->stats.tx_bytes += len;
1272 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1274 struct emac_instance *dev = netdev_priv(ndev);
1275 unsigned int len = skb->len;
1278 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1279 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1281 slot = dev->tx_slot++;
1282 if (dev->tx_slot == NUM_TX_BUFF) {
1284 ctrl |= MAL_TX_CTRL_WRAP;
1287 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1289 dev->tx_skb[slot] = skb;
1290 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1293 dev->tx_desc[slot].data_len = (u16) len;
1295 dev->tx_desc[slot].ctrl = ctrl;
1297 return emac_xmit_finish(dev, len);
1300 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1301 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1302 u32 pd, int len, int last, u16 base_ctrl)
1305 u16 ctrl = base_ctrl;
1306 int chunk = min(len, MAL_MAX_TX_SIZE);
1309 slot = (slot + 1) % NUM_TX_BUFF;
1312 ctrl |= MAL_TX_CTRL_LAST;
1313 if (slot == NUM_TX_BUFF - 1)
1314 ctrl |= MAL_TX_CTRL_WRAP;
1316 dev->tx_skb[slot] = NULL;
1317 dev->tx_desc[slot].data_ptr = pd;
1318 dev->tx_desc[slot].data_len = (u16) chunk;
1319 dev->tx_desc[slot].ctrl = ctrl;
1330 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1331 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1333 struct emac_instance *dev = netdev_priv(ndev);
1334 int nr_frags = skb_shinfo(skb)->nr_frags;
1335 int len = skb->len, chunk;
1340 /* This is common "fast" path */
1341 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1342 return emac_start_xmit(skb, ndev);
1344 len -= skb->data_len;
1346 /* Note, this is only an *estimation*, we can still run out of empty
1347 * slots because of the additional fragmentation into
1348 * MAL_MAX_TX_SIZE-sized chunks
1350 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1353 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1354 emac_tx_csum(dev, skb);
1355 slot = dev->tx_slot;
1358 dev->tx_skb[slot] = NULL;
1359 chunk = min(len, MAL_MAX_TX_SIZE);
1360 dev->tx_desc[slot].data_ptr = pd =
1361 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1362 dev->tx_desc[slot].data_len = (u16) chunk;
1365 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1368 for (i = 0; i < nr_frags; ++i) {
1369 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1372 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1375 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1378 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1382 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1384 /* Attach skb to the last slot so we don't release it too early */
1385 dev->tx_skb[slot] = skb;
1387 /* Send the packet out */
1388 if (dev->tx_slot == NUM_TX_BUFF - 1)
1389 ctrl |= MAL_TX_CTRL_WRAP;
1391 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1392 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1394 return emac_xmit_finish(dev, skb->len);
1397 /* Well, too bad. Our previous estimation was overly optimistic.
1400 while (slot != dev->tx_slot) {
1401 dev->tx_desc[slot].ctrl = 0;
1404 slot = NUM_TX_BUFF - 1;
1406 ++dev->estats.tx_undo;
1409 netif_stop_queue(ndev);
1410 DBG2(dev, "stopped TX queue" NL);
1414 # define emac_start_xmit_sg emac_start_xmit
1415 #endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1418 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1420 struct emac_error_stats *st = &dev->estats;
1422 DBG(dev, "BD TX error %04x" NL, ctrl);
1425 if (ctrl & EMAC_TX_ST_BFCS)
1426 ++st->tx_bd_bad_fcs;
1427 if (ctrl & EMAC_TX_ST_LCS)
1428 ++st->tx_bd_carrier_loss;
1429 if (ctrl & EMAC_TX_ST_ED)
1430 ++st->tx_bd_excessive_deferral;
1431 if (ctrl & EMAC_TX_ST_EC)
1432 ++st->tx_bd_excessive_collisions;
1433 if (ctrl & EMAC_TX_ST_LC)
1434 ++st->tx_bd_late_collision;
1435 if (ctrl & EMAC_TX_ST_MC)
1436 ++st->tx_bd_multple_collisions;
1437 if (ctrl & EMAC_TX_ST_SC)
1438 ++st->tx_bd_single_collision;
1439 if (ctrl & EMAC_TX_ST_UR)
1440 ++st->tx_bd_underrun;
1441 if (ctrl & EMAC_TX_ST_SQE)
1445 static void emac_poll_tx(void *param)
1447 struct emac_instance *dev = param;
1450 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1452 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1453 bad_mask = EMAC_IS_BAD_TX_TAH;
1455 bad_mask = EMAC_IS_BAD_TX;
1457 netif_tx_lock_bh(dev->ndev);
1460 int slot = dev->ack_slot, n = 0;
1462 ctrl = dev->tx_desc[slot].ctrl;
1463 if (!(ctrl & MAL_TX_CTRL_READY)) {
1464 struct sk_buff *skb = dev->tx_skb[slot];
1469 dev->tx_skb[slot] = NULL;
1471 slot = (slot + 1) % NUM_TX_BUFF;
1473 if (unlikely(ctrl & bad_mask))
1474 emac_parse_tx_error(dev, ctrl);
1480 dev->ack_slot = slot;
1481 if (netif_queue_stopped(dev->ndev) &&
1482 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1483 netif_wake_queue(dev->ndev);
1485 DBG2(dev, "tx %d pkts" NL, n);
1488 netif_tx_unlock_bh(dev->ndev);
1491 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1494 struct sk_buff *skb = dev->rx_skb[slot];
1496 DBG2(dev, "recycle %d %d" NL, slot, len);
1499 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1500 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1502 dev->rx_desc[slot].data_len = 0;
1504 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1505 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1508 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1510 struct emac_error_stats *st = &dev->estats;
1512 DBG(dev, "BD RX error %04x" NL, ctrl);
1515 if (ctrl & EMAC_RX_ST_OE)
1516 ++st->rx_bd_overrun;
1517 if (ctrl & EMAC_RX_ST_BP)
1518 ++st->rx_bd_bad_packet;
1519 if (ctrl & EMAC_RX_ST_RP)
1520 ++st->rx_bd_runt_packet;
1521 if (ctrl & EMAC_RX_ST_SE)
1522 ++st->rx_bd_short_event;
1523 if (ctrl & EMAC_RX_ST_AE)
1524 ++st->rx_bd_alignment_error;
1525 if (ctrl & EMAC_RX_ST_BFCS)
1526 ++st->rx_bd_bad_fcs;
1527 if (ctrl & EMAC_RX_ST_PTL)
1528 ++st->rx_bd_packet_too_long;
1529 if (ctrl & EMAC_RX_ST_ORE)
1530 ++st->rx_bd_out_of_range;
1531 if (ctrl & EMAC_RX_ST_IRE)
1532 ++st->rx_bd_in_range;
1535 static inline void emac_rx_csum(struct emac_instance *dev,
1536 struct sk_buff *skb, u16 ctrl)
1538 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1539 if (!ctrl && dev->tah_dev) {
1540 skb->ip_summed = CHECKSUM_UNNECESSARY;
1541 ++dev->stats.rx_packets_csum;
1546 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1548 if (likely(dev->rx_sg_skb != NULL)) {
1549 int len = dev->rx_desc[slot].data_len;
1550 int tot_len = dev->rx_sg_skb->len + len;
1552 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1553 ++dev->estats.rx_dropped_mtu;
1554 dev_kfree_skb(dev->rx_sg_skb);
1555 dev->rx_sg_skb = NULL;
1557 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1558 dev->rx_skb[slot]->data, len);
1559 skb_put(dev->rx_sg_skb, len);
1560 emac_recycle_rx_skb(dev, slot, len);
1564 emac_recycle_rx_skb(dev, slot, 0);
1568 /* NAPI poll context */
1569 static int emac_poll_rx(void *param, int budget)
1571 struct emac_instance *dev = param;
1572 int slot = dev->rx_slot, received = 0;
1574 DBG2(dev, "poll_rx(%d)" NL, budget);
1577 while (budget > 0) {
1579 struct sk_buff *skb;
1580 u16 ctrl = dev->rx_desc[slot].ctrl;
1582 if (ctrl & MAL_RX_CTRL_EMPTY)
1585 skb = dev->rx_skb[slot];
1587 len = dev->rx_desc[slot].data_len;
1589 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1592 ctrl &= EMAC_BAD_RX_MASK;
1593 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1594 emac_parse_rx_error(dev, ctrl);
1595 ++dev->estats.rx_dropped_error;
1596 emac_recycle_rx_skb(dev, slot, 0);
1601 if (len && len < EMAC_RX_COPY_THRESH) {
1602 struct sk_buff *copy_skb =
1603 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1604 if (unlikely(!copy_skb))
1607 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1608 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1610 emac_recycle_rx_skb(dev, slot, len);
1612 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1617 skb->dev = dev->ndev;
1618 skb->protocol = eth_type_trans(skb, dev->ndev);
1619 emac_rx_csum(dev, skb, ctrl);
1621 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1622 ++dev->estats.rx_dropped_stack;
1624 ++dev->stats.rx_packets;
1626 dev->stats.rx_bytes += len;
1627 slot = (slot + 1) % NUM_RX_BUFF;
1632 if (ctrl & MAL_RX_CTRL_FIRST) {
1633 BUG_ON(dev->rx_sg_skb);
1634 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1635 DBG(dev, "rx OOM %d" NL, slot);
1636 ++dev->estats.rx_dropped_oom;
1637 emac_recycle_rx_skb(dev, slot, 0);
1639 dev->rx_sg_skb = skb;
1642 } else if (!emac_rx_sg_append(dev, slot) &&
1643 (ctrl & MAL_RX_CTRL_LAST)) {
1645 skb = dev->rx_sg_skb;
1646 dev->rx_sg_skb = NULL;
1648 ctrl &= EMAC_BAD_RX_MASK;
1649 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1650 emac_parse_rx_error(dev, ctrl);
1651 ++dev->estats.rx_dropped_error;
1659 DBG(dev, "rx OOM %d" NL, slot);
1660 /* Drop the packet and recycle skb */
1661 ++dev->estats.rx_dropped_oom;
1662 emac_recycle_rx_skb(dev, slot, 0);
1667 DBG2(dev, "rx %d BDs" NL, received);
1668 dev->rx_slot = slot;
1671 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1673 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1674 DBG2(dev, "rx restart" NL);
1679 if (dev->rx_sg_skb) {
1680 DBG2(dev, "dropping partial rx packet" NL);
1681 ++dev->estats.rx_dropped_error;
1682 dev_kfree_skb(dev->rx_sg_skb);
1683 dev->rx_sg_skb = NULL;
1686 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1687 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1688 emac_rx_enable(dev);
1694 /* NAPI poll context */
1695 static int emac_peek_rx(void *param)
1697 struct emac_instance *dev = param;
1699 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1702 /* NAPI poll context */
1703 static int emac_peek_rx_sg(void *param)
1705 struct emac_instance *dev = param;
1707 int slot = dev->rx_slot;
1709 u16 ctrl = dev->rx_desc[slot].ctrl;
1710 if (ctrl & MAL_RX_CTRL_EMPTY)
1712 else if (ctrl & MAL_RX_CTRL_LAST)
1715 slot = (slot + 1) % NUM_RX_BUFF;
1717 /* I'm just being paranoid here :) */
1718 if (unlikely(slot == dev->rx_slot))
1724 static void emac_rxde(void *param)
1726 struct emac_instance *dev = param;
1728 ++dev->estats.rx_stopped;
1729 emac_rx_disable_async(dev);
1733 static irqreturn_t emac_irq(int irq, void *dev_instance)
1735 struct emac_instance *dev = dev_instance;
1736 struct emac_regs __iomem *p = dev->emacp;
1737 struct emac_error_stats *st = &dev->estats;
1740 spin_lock(&dev->lock);
1742 isr = in_be32(&p->isr);
1743 out_be32(&p->isr, isr);
1745 DBG(dev, "isr = %08x" NL, isr);
1747 if (isr & EMAC4_ISR_TXPE)
1749 if (isr & EMAC4_ISR_RXPE)
1751 if (isr & EMAC4_ISR_TXUE)
1753 if (isr & EMAC4_ISR_RXOE)
1754 ++st->rx_fifo_overrun;
1755 if (isr & EMAC_ISR_OVR)
1757 if (isr & EMAC_ISR_BP)
1758 ++st->rx_bad_packet;
1759 if (isr & EMAC_ISR_RP)
1760 ++st->rx_runt_packet;
1761 if (isr & EMAC_ISR_SE)
1762 ++st->rx_short_event;
1763 if (isr & EMAC_ISR_ALE)
1764 ++st->rx_alignment_error;
1765 if (isr & EMAC_ISR_BFCS)
1767 if (isr & EMAC_ISR_PTLE)
1768 ++st->rx_packet_too_long;
1769 if (isr & EMAC_ISR_ORE)
1770 ++st->rx_out_of_range;
1771 if (isr & EMAC_ISR_IRE)
1773 if (isr & EMAC_ISR_SQE)
1775 if (isr & EMAC_ISR_TE)
1778 spin_unlock(&dev->lock);
1783 static struct net_device_stats *emac_stats(struct net_device *ndev)
1785 struct emac_instance *dev = netdev_priv(ndev);
1786 struct emac_stats *st = &dev->stats;
1787 struct emac_error_stats *est = &dev->estats;
1788 struct net_device_stats *nst = &dev->nstats;
1789 unsigned long flags;
1791 DBG2(dev, "stats" NL);
1793 /* Compute "legacy" statistics */
1794 spin_lock_irqsave(&dev->lock, flags);
1795 nst->rx_packets = (unsigned long)st->rx_packets;
1796 nst->rx_bytes = (unsigned long)st->rx_bytes;
1797 nst->tx_packets = (unsigned long)st->tx_packets;
1798 nst->tx_bytes = (unsigned long)st->tx_bytes;
1799 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1800 est->rx_dropped_error +
1801 est->rx_dropped_resize +
1802 est->rx_dropped_mtu);
1803 nst->tx_dropped = (unsigned long)est->tx_dropped;
1805 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1806 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1807 est->rx_fifo_overrun +
1809 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1810 est->rx_alignment_error);
1811 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1813 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1814 est->rx_bd_short_event +
1815 est->rx_bd_packet_too_long +
1816 est->rx_bd_out_of_range +
1817 est->rx_bd_in_range +
1818 est->rx_runt_packet +
1819 est->rx_short_event +
1820 est->rx_packet_too_long +
1821 est->rx_out_of_range +
1824 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1825 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1827 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1828 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1829 est->tx_bd_excessive_collisions +
1830 est->tx_bd_late_collision +
1831 est->tx_bd_multple_collisions);
1832 spin_unlock_irqrestore(&dev->lock, flags);
1836 static struct mal_commac_ops emac_commac_ops = {
1837 .poll_tx = &emac_poll_tx,
1838 .poll_rx = &emac_poll_rx,
1839 .peek_rx = &emac_peek_rx,
1843 static struct mal_commac_ops emac_commac_sg_ops = {
1844 .poll_tx = &emac_poll_tx,
1845 .poll_rx = &emac_poll_rx,
1846 .peek_rx = &emac_peek_rx_sg,
1850 /* Ethtool support */
1851 static int emac_ethtool_get_settings(struct net_device *ndev,
1852 struct ethtool_cmd *cmd)
1854 struct emac_instance *dev = netdev_priv(ndev);
1856 cmd->supported = dev->phy.features;
1857 cmd->port = PORT_MII;
1858 cmd->phy_address = dev->phy.address;
1860 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1862 mutex_lock(&dev->link_lock);
1863 cmd->advertising = dev->phy.advertising;
1864 cmd->autoneg = dev->phy.autoneg;
1865 cmd->speed = dev->phy.speed;
1866 cmd->duplex = dev->phy.duplex;
1867 mutex_unlock(&dev->link_lock);
1872 static int emac_ethtool_set_settings(struct net_device *ndev,
1873 struct ethtool_cmd *cmd)
1875 struct emac_instance *dev = netdev_priv(ndev);
1876 u32 f = dev->phy.features;
1878 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1879 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1881 /* Basic sanity checks */
1882 if (dev->phy.address < 0)
1884 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1886 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1888 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1891 if (cmd->autoneg == AUTONEG_DISABLE) {
1892 switch (cmd->speed) {
1894 if (cmd->duplex == DUPLEX_HALF
1895 && !(f & SUPPORTED_10baseT_Half))
1897 if (cmd->duplex == DUPLEX_FULL
1898 && !(f & SUPPORTED_10baseT_Full))
1902 if (cmd->duplex == DUPLEX_HALF
1903 && !(f & SUPPORTED_100baseT_Half))
1905 if (cmd->duplex == DUPLEX_FULL
1906 && !(f & SUPPORTED_100baseT_Full))
1910 if (cmd->duplex == DUPLEX_HALF
1911 && !(f & SUPPORTED_1000baseT_Half))
1913 if (cmd->duplex == DUPLEX_FULL
1914 && !(f & SUPPORTED_1000baseT_Full))
1921 mutex_lock(&dev->link_lock);
1922 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1924 mutex_unlock(&dev->link_lock);
1927 if (!(f & SUPPORTED_Autoneg))
1930 mutex_lock(&dev->link_lock);
1931 dev->phy.def->ops->setup_aneg(&dev->phy,
1932 (cmd->advertising & f) |
1933 (dev->phy.advertising &
1935 ADVERTISED_Asym_Pause)));
1936 mutex_unlock(&dev->link_lock);
1938 emac_force_link_update(dev);
1943 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1944 struct ethtool_ringparam *rp)
1946 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1947 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1950 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1951 struct ethtool_pauseparam *pp)
1953 struct emac_instance *dev = netdev_priv(ndev);
1955 mutex_lock(&dev->link_lock);
1956 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1957 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1960 if (dev->phy.duplex == DUPLEX_FULL) {
1962 pp->rx_pause = pp->tx_pause = 1;
1963 else if (dev->phy.asym_pause)
1966 mutex_unlock(&dev->link_lock);
1969 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1971 struct emac_instance *dev = netdev_priv(ndev);
1973 return dev->tah_dev != NULL;
1976 static int emac_get_regs_len(struct emac_instance *dev)
1978 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1979 return sizeof(struct emac_ethtool_regs_subhdr) +
1980 EMAC4_ETHTOOL_REGS_SIZE;
1982 return sizeof(struct emac_ethtool_regs_subhdr) +
1983 EMAC_ETHTOOL_REGS_SIZE;
1986 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1988 struct emac_instance *dev = netdev_priv(ndev);
1991 size = sizeof(struct emac_ethtool_regs_hdr) +
1992 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1993 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1994 size += zmii_get_regs_len(dev->zmii_dev);
1995 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1996 size += rgmii_get_regs_len(dev->rgmii_dev);
1997 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1998 size += tah_get_regs_len(dev->tah_dev);
2003 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2005 struct emac_ethtool_regs_subhdr *hdr = buf;
2007 hdr->index = dev->cell_index;
2008 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2009 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2010 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2011 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2013 hdr->version = EMAC_ETHTOOL_REGS_VER;
2014 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2015 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2019 static void emac_ethtool_get_regs(struct net_device *ndev,
2020 struct ethtool_regs *regs, void *buf)
2022 struct emac_instance *dev = netdev_priv(ndev);
2023 struct emac_ethtool_regs_hdr *hdr = buf;
2025 hdr->components = 0;
2028 buf = mal_dump_regs(dev->mal, buf);
2029 buf = emac_dump_regs(dev, buf);
2030 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2031 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2032 buf = zmii_dump_regs(dev->zmii_dev, buf);
2034 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2035 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2036 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2039 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2040 buf = tah_dump_regs(dev->tah_dev, buf);
2044 static int emac_ethtool_nway_reset(struct net_device *ndev)
2046 struct emac_instance *dev = netdev_priv(ndev);
2049 DBG(dev, "nway_reset" NL);
2051 if (dev->phy.address < 0)
2054 mutex_lock(&dev->link_lock);
2055 if (!dev->phy.autoneg) {
2060 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2062 mutex_unlock(&dev->link_lock);
2063 emac_force_link_update(dev);
2067 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2069 return EMAC_ETHTOOL_STATS_COUNT;
2072 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2075 if (stringset == ETH_SS_STATS)
2076 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2079 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2080 struct ethtool_stats *estats,
2083 struct emac_instance *dev = netdev_priv(ndev);
2085 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2086 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2087 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2090 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2091 struct ethtool_drvinfo *info)
2093 struct emac_instance *dev = netdev_priv(ndev);
2095 strcpy(info->driver, "ibm_emac");
2096 strcpy(info->version, DRV_VERSION);
2097 info->fw_version[0] = '\0';
2098 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2099 dev->cell_index, dev->ofdev->node->full_name);
2100 info->n_stats = emac_ethtool_get_stats_count(ndev);
2101 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2104 static const struct ethtool_ops emac_ethtool_ops = {
2105 .get_settings = emac_ethtool_get_settings,
2106 .set_settings = emac_ethtool_set_settings,
2107 .get_drvinfo = emac_ethtool_get_drvinfo,
2109 .get_regs_len = emac_ethtool_get_regs_len,
2110 .get_regs = emac_ethtool_get_regs,
2112 .nway_reset = emac_ethtool_nway_reset,
2114 .get_ringparam = emac_ethtool_get_ringparam,
2115 .get_pauseparam = emac_ethtool_get_pauseparam,
2117 .get_rx_csum = emac_ethtool_get_rx_csum,
2119 .get_strings = emac_ethtool_get_strings,
2120 .get_stats_count = emac_ethtool_get_stats_count,
2121 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2123 .get_link = ethtool_op_get_link,
2124 .get_tx_csum = ethtool_op_get_tx_csum,
2125 .get_sg = ethtool_op_get_sg,
2128 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2130 struct emac_instance *dev = netdev_priv(ndev);
2131 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2133 DBG(dev, "ioctl %08x" NL, cmd);
2135 if (dev->phy.address < 0)
2140 case SIOCDEVPRIVATE:
2141 data[0] = dev->phy.address;
2144 case SIOCDEVPRIVATE + 1:
2145 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2149 case SIOCDEVPRIVATE + 2:
2150 if (!capable(CAP_NET_ADMIN))
2152 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2159 struct emac_depentry {
2161 struct device_node *node;
2162 struct of_device *ofdev;
2166 #define EMAC_DEP_MAL_IDX 0
2167 #define EMAC_DEP_ZMII_IDX 1
2168 #define EMAC_DEP_RGMII_IDX 2
2169 #define EMAC_DEP_TAH_IDX 3
2170 #define EMAC_DEP_MDIO_IDX 4
2171 #define EMAC_DEP_PREV_IDX 5
2172 #define EMAC_DEP_COUNT 6
2174 static int __devinit emac_check_deps(struct emac_instance *dev,
2175 struct emac_depentry *deps)
2178 struct device_node *np;
2180 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2181 /* no dependency on that item, allright */
2182 if (deps[i].phandle == 0) {
2186 /* special case for blist as the dependency might go away */
2187 if (i == EMAC_DEP_PREV_IDX) {
2188 np = *(dev->blist - 1);
2190 deps[i].phandle = 0;
2194 if (deps[i].node == NULL)
2195 deps[i].node = of_node_get(np);
2197 if (deps[i].node == NULL)
2198 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2199 if (deps[i].node == NULL)
2201 if (deps[i].ofdev == NULL)
2202 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2203 if (deps[i].ofdev == NULL)
2205 if (deps[i].drvdata == NULL)
2206 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2207 if (deps[i].drvdata != NULL)
2210 return (there == EMAC_DEP_COUNT);
2213 static void emac_put_deps(struct emac_instance *dev)
2216 of_dev_put(dev->mal_dev);
2218 of_dev_put(dev->zmii_dev);
2220 of_dev_put(dev->rgmii_dev);
2222 of_dev_put(dev->mdio_dev);
2224 of_dev_put(dev->tah_dev);
2227 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2228 unsigned long action, void *data)
2230 /* We are only intereted in device addition */
2231 if (action == BUS_NOTIFY_BOUND_DRIVER)
2232 wake_up_all(&emac_probe_wait);
2236 static struct notifier_block emac_of_bus_notifier = {
2237 .notifier_call = emac_of_bus_notify
2240 static int __devinit emac_wait_deps(struct emac_instance *dev)
2242 struct emac_depentry deps[EMAC_DEP_COUNT];
2245 memset(&deps, 0, sizeof(deps));
2247 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2248 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2249 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2251 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2253 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2254 if (dev->blist && dev->blist > emac_boot_list)
2255 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2256 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2257 wait_event_timeout(emac_probe_wait,
2258 emac_check_deps(dev, deps),
2259 EMAC_PROBE_DEP_TIMEOUT);
2260 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2261 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2262 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2264 of_node_put(deps[i].node);
2265 if (err && deps[i].ofdev)
2266 of_dev_put(deps[i].ofdev);
2269 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2270 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2271 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2272 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2273 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2275 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2276 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2280 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2281 u32 *val, int fatal)
2284 const u32 *prop = of_get_property(np, name, &len);
2285 if (prop == NULL || len < sizeof(u32)) {
2287 printk(KERN_ERR "%s: missing %s property\n",
2288 np->full_name, name);
2295 static int __devinit emac_init_phy(struct emac_instance *dev)
2297 struct device_node *np = dev->ofdev->node;
2298 struct net_device *ndev = dev->ndev;
2302 dev->phy.dev = ndev;
2303 dev->phy.mode = dev->phy_mode;
2305 /* PHY-less configuration.
2306 * XXX I probably should move these settings to the dev tree
2308 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2311 /* PHY-less configuration.
2312 * XXX I probably should move these settings to the dev tree
2314 dev->phy.address = -1;
2315 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2321 mutex_lock(&emac_phy_map_lock);
2322 phy_map = dev->phy_map | busy_phy_map;
2324 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2326 dev->phy.mdio_read = emac_mdio_read;
2327 dev->phy.mdio_write = emac_mdio_write;
2329 /* Configure EMAC with defaults so we can at least use MDIO
2330 * This is needed mostly for 440GX
2332 if (emac_phy_gpcs(dev->phy.mode)) {
2334 * Make GPCS PHY address equal to EMAC index.
2335 * We probably should take into account busy_phy_map
2336 * and/or phy_map here.
2338 * Note that the busy_phy_map is currently global
2339 * while it should probably be per-ASIC...
2341 dev->phy.address = dev->cell_index;
2344 emac_configure(dev);
2346 if (dev->phy_address != 0xffffffff)
2347 phy_map = ~(1 << dev->phy_address);
2349 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2350 if (!(phy_map & 1)) {
2352 busy_phy_map |= 1 << i;
2354 /* Quick check if there is a PHY at the address */
2355 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2356 if (r == 0xffff || r < 0)
2358 if (!emac_mii_phy_probe(&dev->phy, i))
2361 mutex_unlock(&emac_phy_map_lock);
2363 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2368 if (dev->phy.def->ops->init)
2369 dev->phy.def->ops->init(&dev->phy);
2371 /* Disable any PHY features not supported by the platform */
2372 dev->phy.def->features &= ~dev->phy_feat_exc;
2374 /* Setup initial link parameters */
2375 if (dev->phy.features & SUPPORTED_Autoneg) {
2376 adv = dev->phy.features;
2377 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2378 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2379 /* Restart autonegotiation */
2380 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2382 u32 f = dev->phy.def->features;
2383 int speed = SPEED_10, fd = DUPLEX_HALF;
2385 /* Select highest supported speed/duplex */
2386 if (f & SUPPORTED_1000baseT_Full) {
2389 } else if (f & SUPPORTED_1000baseT_Half)
2391 else if (f & SUPPORTED_100baseT_Full) {
2394 } else if (f & SUPPORTED_100baseT_Half)
2396 else if (f & SUPPORTED_10baseT_Full)
2399 /* Force link parameters */
2400 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2405 static int __devinit emac_init_config(struct emac_instance *dev)
2407 struct device_node *np = dev->ofdev->node;
2410 const char *pm, *phy_modes[] = {
2412 [PHY_MODE_MII] = "mii",
2413 [PHY_MODE_RMII] = "rmii",
2414 [PHY_MODE_SMII] = "smii",
2415 [PHY_MODE_RGMII] = "rgmii",
2416 [PHY_MODE_TBI] = "tbi",
2417 [PHY_MODE_GMII] = "gmii",
2418 [PHY_MODE_RTBI] = "rtbi",
2419 [PHY_MODE_SGMII] = "sgmii",
2422 /* Read config from device-tree */
2423 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2425 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2427 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2429 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2431 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2432 dev->max_mtu = 1500;
2433 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2434 dev->rx_fifo_size = 2048;
2435 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2436 dev->tx_fifo_size = 2048;
2437 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2438 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2439 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2440 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2441 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2442 dev->phy_address = 0xffffffff;
2443 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2444 dev->phy_map = 0xffffffff;
2445 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2447 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2449 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2451 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2453 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2455 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2456 dev->zmii_port = 0xffffffff;;
2457 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2459 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2460 dev->rgmii_port = 0xffffffff;;
2461 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2462 dev->fifo_entry_size = 16;
2463 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2464 dev->mal_burst_size = 256;
2466 /* PHY mode needs some decoding */
2467 dev->phy_mode = PHY_MODE_NA;
2468 pm = of_get_property(np, "phy-mode", &plen);
2471 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2472 if (!strcasecmp(pm, phy_modes[i])) {
2478 /* Backward compat with non-final DT */
2479 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2480 u32 nmode = *(const u32 *)pm;
2481 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2482 dev->phy_mode = nmode;
2485 /* Check EMAC version */
2486 if (of_device_is_compatible(np, "ibm,emac4"))
2487 dev->features |= EMAC_FTR_EMAC4;
2489 /* Fixup some feature bits based on the device tree */
2490 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2491 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2492 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2493 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2495 /* CAB lacks the appropriate properties */
2496 if (of_device_is_compatible(np, "ibm,emac-axon"))
2497 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2498 EMAC_FTR_STACR_OC_INVERT;
2500 /* Enable TAH/ZMII/RGMII features as found */
2501 if (dev->tah_ph != 0) {
2502 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2503 dev->features |= EMAC_FTR_HAS_TAH;
2505 printk(KERN_ERR "%s: TAH support not enabled !\n",
2511 if (dev->zmii_ph != 0) {
2512 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2513 dev->features |= EMAC_FTR_HAS_ZMII;
2515 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2521 if (dev->rgmii_ph != 0) {
2522 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2523 dev->features |= EMAC_FTR_HAS_RGMII;
2525 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2531 /* Read MAC-address */
2532 p = of_get_property(np, "local-mac-address", NULL);
2534 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2538 memcpy(dev->ndev->dev_addr, p, 6);
2540 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2541 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2542 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2543 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2544 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2549 static int __devinit emac_probe(struct of_device *ofdev,
2550 const struct of_device_id *match)
2552 struct net_device *ndev;
2553 struct emac_instance *dev;
2554 struct device_node *np = ofdev->node;
2555 struct device_node **blist = NULL;
2558 /* Skip unused/unwired EMACS */
2559 if (of_get_property(np, "unused", NULL))
2562 /* Find ourselves in the bootlist if we are there */
2563 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2564 if (emac_boot_list[i] == np)
2565 blist = &emac_boot_list[i];
2567 /* Allocate our net_device structure */
2569 ndev = alloc_etherdev(sizeof(struct emac_instance));
2571 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2575 dev = netdev_priv(ndev);
2579 SET_NETDEV_DEV(ndev, &ofdev->dev);
2581 /* Initialize some embedded data structures */
2582 mutex_init(&dev->mdio_lock);
2583 mutex_init(&dev->link_lock);
2584 spin_lock_init(&dev->lock);
2585 INIT_WORK(&dev->reset_work, emac_reset_work);
2587 /* Init various config data based on device-tree */
2588 err = emac_init_config(dev);
2592 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2593 dev->emac_irq = irq_of_parse_and_map(np, 0);
2594 dev->wol_irq = irq_of_parse_and_map(np, 1);
2595 if (dev->emac_irq == NO_IRQ) {
2596 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2599 ndev->irq = dev->emac_irq;
2602 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2603 printk(KERN_ERR "%s: Can't get registers address\n",
2607 // TODO : request_mem_region
2608 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2609 if (dev->emacp == NULL) {
2610 printk(KERN_ERR "%s: Can't map device registers!\n",
2616 /* Wait for dependent devices */
2617 err = emac_wait_deps(dev);
2620 "%s: Timeout waiting for dependent devices\n",
2622 /* display more info about what's missing ? */
2625 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2626 if (dev->mdio_dev != NULL)
2627 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2629 /* Register with MAL */
2630 dev->commac.ops = &emac_commac_ops;
2631 dev->commac.dev = dev;
2632 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2633 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2634 err = mal_register_commac(dev->mal, &dev->commac);
2636 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2637 np->full_name, dev->mal_dev->node->full_name);
2640 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2641 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2643 /* Get pointers to BD rings */
2645 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2647 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2649 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2650 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2653 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2654 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2656 /* Attach to ZMII, if needed */
2657 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2658 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2659 goto err_unreg_commac;
2661 /* Attach to RGMII, if needed */
2662 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2663 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2664 goto err_detach_zmii;
2666 /* Attach to TAH, if needed */
2667 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2668 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2669 goto err_detach_rgmii;
2671 /* Set some link defaults before we can find out real parameters */
2672 dev->phy.speed = SPEED_100;
2673 dev->phy.duplex = DUPLEX_FULL;
2674 dev->phy.autoneg = AUTONEG_DISABLE;
2675 dev->phy.pause = dev->phy.asym_pause = 0;
2676 dev->stop_timeout = STOP_TIMEOUT_100;
2677 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2679 /* Find PHY if any */
2680 err = emac_init_phy(dev);
2682 goto err_detach_tah;
2684 /* Fill in the driver function table */
2685 ndev->open = &emac_open;
2686 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2688 ndev->hard_start_xmit = &emac_start_xmit_sg;
2689 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2692 ndev->hard_start_xmit = &emac_start_xmit;
2693 ndev->tx_timeout = &emac_tx_timeout;
2694 ndev->watchdog_timeo = 5 * HZ;
2695 ndev->stop = &emac_close;
2696 ndev->get_stats = &emac_stats;
2697 ndev->set_multicast_list = &emac_set_multicast_list;
2698 ndev->do_ioctl = &emac_ioctl;
2699 if (emac_phy_supports_gige(dev->phy_mode)) {
2700 ndev->change_mtu = &emac_change_mtu;
2701 dev->commac.ops = &emac_commac_sg_ops;
2703 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2705 netif_carrier_off(ndev);
2706 netif_stop_queue(ndev);
2708 err = register_netdev(ndev);
2710 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2711 np->full_name, err);
2712 goto err_detach_tah;
2715 /* Set our drvdata last as we don't want them visible until we are
2719 dev_set_drvdata(&ofdev->dev, dev);
2721 /* There's a new kid in town ! Let's tell everybody */
2722 wake_up_all(&emac_probe_wait);
2726 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2727 ndev->name, dev->cell_index, np->full_name,
2728 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2729 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2731 if (dev->phy.address >= 0)
2732 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2733 dev->phy.def->name, dev->phy.address);
2735 emac_dbg_register(dev);
2740 /* I have a bad feeling about this ... */
2743 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2744 tah_detach(dev->tah_dev, dev->tah_port);
2746 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2747 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2749 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2750 zmii_detach(dev->zmii_dev, dev->zmii_port);
2752 mal_unregister_commac(dev->mal, &dev->commac);
2756 iounmap(dev->emacp);
2758 if (dev->wol_irq != NO_IRQ)
2759 irq_dispose_mapping(dev->wol_irq);
2760 if (dev->emac_irq != NO_IRQ)
2761 irq_dispose_mapping(dev->emac_irq);
2765 /* if we were on the bootlist, remove us as we won't show up and
2766 * wake up all waiters to notify them in case they were waiting
2771 wake_up_all(&emac_probe_wait);
2776 static int __devexit emac_remove(struct of_device *ofdev)
2778 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2780 DBG(dev, "remove" NL);
2782 dev_set_drvdata(&ofdev->dev, NULL);
2784 unregister_netdev(dev->ndev);
2786 flush_scheduled_work();
2788 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2789 tah_detach(dev->tah_dev, dev->tah_port);
2790 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2791 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2792 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2793 zmii_detach(dev->zmii_dev, dev->zmii_port);
2795 mal_unregister_commac(dev->mal, &dev->commac);
2798 emac_dbg_unregister(dev);
2799 iounmap(dev->emacp);
2801 if (dev->wol_irq != NO_IRQ)
2802 irq_dispose_mapping(dev->wol_irq);
2803 if (dev->emac_irq != NO_IRQ)
2804 irq_dispose_mapping(dev->emac_irq);
2811 /* XXX Features in here should be replaced by properties... */
2812 static struct of_device_id emac_match[] =
2816 .compatible = "ibm,emac",
2820 .compatible = "ibm,emac4",
2825 static struct of_platform_driver emac_driver = {
2827 .match_table = emac_match,
2829 .probe = emac_probe,
2830 .remove = emac_remove,
2833 static void __init emac_make_bootlist(void)
2835 struct device_node *np = NULL;
2836 int j, max, i = 0, k;
2837 int cell_indices[EMAC_BOOT_LIST_SIZE];
2840 while((np = of_find_all_nodes(np)) != NULL) {
2843 if (of_match_node(emac_match, np) == NULL)
2845 if (of_get_property(np, "unused", NULL))
2847 idx = of_get_property(np, "cell-index", NULL);
2850 cell_indices[i] = *idx;
2851 emac_boot_list[i++] = of_node_get(np);
2852 if (i >= EMAC_BOOT_LIST_SIZE) {
2859 /* Bubble sort them (doh, what a creative algorithm :-) */
2860 for (i = 0; max > 1 && (i < (max - 1)); i++)
2861 for (j = i; j < max; j++) {
2862 if (cell_indices[i] > cell_indices[j]) {
2863 np = emac_boot_list[i];
2864 emac_boot_list[i] = emac_boot_list[j];
2865 emac_boot_list[j] = np;
2866 k = cell_indices[i];
2867 cell_indices[i] = cell_indices[j];
2868 cell_indices[j] = k;
2873 static int __init emac_init(void)
2877 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2879 /* Init debug stuff */
2882 /* Build EMAC boot list */
2883 emac_make_bootlist();
2885 /* Init submodules */
2898 rc = of_register_platform_driver(&emac_driver);
2916 static void __exit emac_exit(void)
2920 of_unregister_platform_driver(&emac_driver);
2928 /* Destroy EMAC boot list */
2929 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2930 if (emac_boot_list[i])
2931 of_node_put(emac_boot_list[i]);
2934 module_init(emac_init);
2935 module_exit(emac_exit);