2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/crc32.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/bitops.h>
39 #include <asm/processor.h>
42 #include <asm/uaccess.h>
45 #include "ibm_emac_core.h"
46 #include "ibm_emac_debug.h"
49 * Lack of dma_unmap_???? calls is intentional.
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
65 #define DRV_NAME "emac"
66 #define DRV_VERSION "3.54"
67 #define DRV_DESC "PPC 4xx OCP EMAC driver"
69 MODULE_DESCRIPTION(DRV_DESC);
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
72 MODULE_LICENSE("GPL");
74 /* minimum number of free TX descriptors required to wake up TX process */
75 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
77 /* If packet size is less than this number, we allocate small skb and copy packet
78 * contents into it instead of just sending original big skb up
80 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
82 /* Since multiple EMACs share MDIO lines in various ways, we need
83 * to avoid re-using the same PHY ID in cases where the arch didn't
84 * setup precise phy_map entries
86 static u32 busy_phy_map;
88 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
89 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
90 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
91 * with PHY RX clock problem.
92 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
93 * also allows controlling each EMAC clock
95 static inline void EMAC_RX_CLK_TX(int idx)
98 local_irq_save(flags);
100 #if defined(CONFIG_405EP)
101 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
102 #else /* CONFIG_440EP || CONFIG_440GR */
103 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
106 local_irq_restore(flags);
109 static inline void EMAC_RX_CLK_DEFAULT(int idx)
112 local_irq_save(flags);
114 #if defined(CONFIG_405EP)
115 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
116 #else /* CONFIG_440EP */
117 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
120 local_irq_restore(flags);
123 #define EMAC_RX_CLK_TX(idx) ((void)0)
124 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
127 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
128 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
129 * unfortunately this is less flexible than 440EP case, because it's a global
130 * setting for all EMACs, therefore we do this clock trick only during probe.
132 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
133 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
134 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137 #define EMAC_CLK_INTERNAL ((void)0)
138 #define EMAC_CLK_EXTERNAL ((void)0)
141 /* I don't want to litter system log with timeout errors
142 * when we have brain-damaged PHY.
144 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
147 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
148 DBG("%d: %s" NL, dev->def->index, error);
151 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
155 /* PHY polling intervals */
156 #define PHY_POLL_LINK_ON HZ
157 #define PHY_POLL_LINK_OFF (HZ / 5)
159 /* Graceful stop timeouts in us.
160 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
162 #define STOP_TIMEOUT_10 1230
163 #define STOP_TIMEOUT_100 124
164 #define STOP_TIMEOUT_1000 13
165 #define STOP_TIMEOUT_1000_JUMBO 73
167 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
168 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
169 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
170 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
171 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
172 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
173 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
174 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
175 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
176 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
177 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
178 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
179 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
180 "tx_bd_excessive_collisions", "tx_bd_late_collision",
181 "tx_bd_multple_collisions", "tx_bd_single_collision",
182 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
186 static irqreturn_t emac_irq(int irq, void *dev_instance);
187 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
189 static inline int emac_phy_supports_gige(int phy_mode)
191 return phy_mode == PHY_MODE_GMII ||
192 phy_mode == PHY_MODE_RGMII ||
193 phy_mode == PHY_MODE_TBI ||
194 phy_mode == PHY_MODE_RTBI;
197 static inline int emac_phy_gpcs(int phy_mode)
199 return phy_mode == PHY_MODE_TBI ||
200 phy_mode == PHY_MODE_RTBI;
203 static inline void emac_tx_enable(struct ocp_enet_private *dev)
205 struct emac_regs __iomem *p = dev->emacp;
209 local_irq_save(flags);
211 DBG("%d: tx_enable" NL, dev->def->index);
213 r = in_be32(&p->mr0);
214 if (!(r & EMAC_MR0_TXE))
215 out_be32(&p->mr0, r | EMAC_MR0_TXE);
216 local_irq_restore(flags);
219 static void emac_tx_disable(struct ocp_enet_private *dev)
221 struct emac_regs __iomem *p = dev->emacp;
225 local_irq_save(flags);
227 DBG("%d: tx_disable" NL, dev->def->index);
229 r = in_be32(&p->mr0);
230 if (r & EMAC_MR0_TXE) {
231 int n = dev->stop_timeout;
232 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
233 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238 emac_report_timeout_error(dev, "TX disable timeout");
240 local_irq_restore(flags);
243 static void emac_rx_enable(struct ocp_enet_private *dev)
245 struct emac_regs __iomem *p = dev->emacp;
249 local_irq_save(flags);
250 if (unlikely(dev->commac.rx_stopped))
253 DBG("%d: rx_enable" NL, dev->def->index);
255 r = in_be32(&p->mr0);
256 if (!(r & EMAC_MR0_RXE)) {
257 if (unlikely(!(r & EMAC_MR0_RXI))) {
258 /* Wait if previous async disable is still in progress */
259 int n = dev->stop_timeout;
260 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
265 emac_report_timeout_error(dev,
266 "RX disable timeout");
268 out_be32(&p->mr0, r | EMAC_MR0_RXE);
271 local_irq_restore(flags);
274 static void emac_rx_disable(struct ocp_enet_private *dev)
276 struct emac_regs __iomem *p = dev->emacp;
280 local_irq_save(flags);
282 DBG("%d: rx_disable" NL, dev->def->index);
284 r = in_be32(&p->mr0);
285 if (r & EMAC_MR0_RXE) {
286 int n = dev->stop_timeout;
287 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
293 emac_report_timeout_error(dev, "RX disable timeout");
295 local_irq_restore(flags);
298 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
300 struct emac_regs __iomem *p = dev->emacp;
304 local_irq_save(flags);
306 DBG("%d: rx_disable_async" NL, dev->def->index);
308 r = in_be32(&p->mr0);
309 if (r & EMAC_MR0_RXE)
310 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
311 local_irq_restore(flags);
314 static int emac_reset(struct ocp_enet_private *dev)
316 struct emac_regs __iomem *p = dev->emacp;
320 DBG("%d: reset" NL, dev->def->index);
322 local_irq_save(flags);
324 if (!dev->reset_failed) {
325 /* 40x erratum suggests stopping RX channel before reset,
328 emac_rx_disable(dev);
329 emac_tx_disable(dev);
332 out_be32(&p->mr0, EMAC_MR0_SRST);
333 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
335 local_irq_restore(flags);
338 dev->reset_failed = 0;
341 emac_report_timeout_error(dev, "reset timeout");
342 dev->reset_failed = 1;
347 static void emac_hash_mc(struct ocp_enet_private *dev)
349 struct emac_regs __iomem *p = dev->emacp;
351 struct dev_mc_list *dmi;
353 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
355 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
357 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
359 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
360 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
362 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
363 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
365 out_be32(&p->gaht1, gaht[0]);
366 out_be32(&p->gaht2, gaht[1]);
367 out_be32(&p->gaht3, gaht[2]);
368 out_be32(&p->gaht4, gaht[3]);
371 static inline u32 emac_iff2rmr(struct net_device *ndev)
373 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
376 if (ndev->flags & IFF_PROMISC)
378 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
380 else if (ndev->mc_count > 0)
386 static inline int emac_opb_mhz(void)
388 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
392 static int emac_configure(struct ocp_enet_private *dev)
394 struct emac_regs __iomem *p = dev->emacp;
395 struct net_device *ndev = dev->ndev;
399 DBG("%d: configure" NL, dev->def->index);
401 if (emac_reset(dev) < 0)
404 tah_reset(dev->tah_dev);
407 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
408 if (dev->phy.duplex == DUPLEX_FULL)
409 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
410 dev->stop_timeout = STOP_TIMEOUT_10;
411 switch (dev->phy.speed) {
413 if (emac_phy_gpcs(dev->phy.mode)) {
414 r |= EMAC_MR1_MF_1000GPCS |
415 EMAC_MR1_MF_IPPA(dev->phy.address);
417 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
418 * identify this GPCS PHY later.
420 out_be32(&p->ipcr, 0xdeadbeef);
422 r |= EMAC_MR1_MF_1000;
423 r |= EMAC_MR1_RFS_16K;
426 if (dev->ndev->mtu > ETH_DATA_LEN) {
428 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
430 dev->stop_timeout = STOP_TIMEOUT_1000;
433 r |= EMAC_MR1_MF_100;
434 dev->stop_timeout = STOP_TIMEOUT_100;
437 r |= EMAC_MR1_RFS_4K;
443 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
446 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
448 #if !defined(CONFIG_40x)
449 /* on 40x erratum forces us to NOT use integrated flow control,
450 * let's hope it works on 44x ;)
452 if (dev->phy.duplex == DUPLEX_FULL) {
454 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
455 else if (dev->phy.asym_pause)
459 out_be32(&p->mr1, r);
461 /* Set individual MAC address */
462 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
463 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
464 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
467 /* VLAN Tag Protocol ID */
468 out_be32(&p->vtpid, 0x8100);
470 /* Receive mode register */
471 r = emac_iff2rmr(ndev);
472 if (r & EMAC_RMR_MAE)
474 out_be32(&p->rmr, r);
476 /* FIFOs thresholds */
477 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
478 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
479 out_be32(&p->tmr1, r);
480 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
482 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
483 there should be still enough space in FIFO to allow the our link
484 partner time to process this frame and also time to send PAUSE
487 Here is the worst case scenario for the RX FIFO "headroom"
488 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
490 1) One maximum-length frame on TX 1522 bytes
491 2) One PAUSE frame time 64 bytes
492 3) PAUSE frame decode time allowance 64 bytes
493 4) One maximum-length frame on RX 1522 bytes
494 5) Round-trip propagation delay of the link (100Mb) 15 bytes
498 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
499 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
501 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
502 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
503 out_be32(&p->rwmr, r);
505 /* Set PAUSE timer to the maximum */
506 out_be32(&p->ptr, 0xffff);
509 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
510 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
511 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
512 EMAC_ISR_IRE | EMAC_ISR_TE);
514 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
515 if (emac_phy_gpcs(dev->phy.mode))
516 mii_reset_phy(&dev->phy);
522 static void emac_reinitialize(struct ocp_enet_private *dev)
524 DBG("%d: reinitialize" NL, dev->def->index);
526 if (!emac_configure(dev)) {
533 static void emac_full_tx_reset(struct net_device *ndev)
535 struct ocp_enet_private *dev = ndev->priv;
536 struct ocp_func_emac_data *emacdata = dev->def->additions;
538 DBG("%d: full_tx_reset" NL, dev->def->index);
540 emac_tx_disable(dev);
541 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
542 emac_clean_tx_ring(dev);
543 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
547 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
551 netif_wake_queue(ndev);
554 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
556 struct emac_regs __iomem *p = dev->emacp;
560 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
562 /* Enable proper MDIO port */
563 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
565 /* Wait for management interface to become idle */
567 while (!emac_phy_done(in_be32(&p->stacr))) {
573 /* Issue read command */
575 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
576 (reg & EMAC_STACR_PRA_MASK)
577 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
580 /* Wait for read to complete */
582 while (!emac_phy_done(r = in_be32(&p->stacr))) {
588 if (unlikely(r & EMAC_STACR_PHYE)) {
589 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
594 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
595 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
598 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
602 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
605 struct emac_regs __iomem *p = dev->emacp;
608 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
611 /* Enable proper MDIO port */
612 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
614 /* Wait for management interface to be idle */
616 while (!emac_phy_done(in_be32(&p->stacr))) {
622 /* Issue write command */
624 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
625 (reg & EMAC_STACR_PRA_MASK) |
626 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
627 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
629 /* Wait for write to complete */
631 while (!emac_phy_done(in_be32(&p->stacr))) {
638 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
641 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
643 struct ocp_enet_private *dev = ndev->priv;
647 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
653 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
655 struct ocp_enet_private *dev = ndev->priv;
658 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
659 (u8) reg, (u16) val);
664 static void emac_set_multicast_list(struct net_device *ndev)
666 struct ocp_enet_private *dev = ndev->priv;
667 struct emac_regs __iomem *p = dev->emacp;
668 u32 rmr = emac_iff2rmr(ndev);
670 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
671 BUG_ON(!netif_running(dev->ndev));
673 /* I decided to relax register access rules here to avoid
676 * There is a real problem with EMAC4 core if we use MWSW_001 bit
677 * in MR1 register and do a full EMAC reset.
678 * One TX BD status update is delayed and, after EMAC reset, it
679 * never happens, resulting in TX hung (it'll be recovered by TX
680 * timeout handler eventually, but this is just gross).
681 * So we either have to do full TX reset or try to cheat here :)
683 * The only required change is to RX mode register, so I *think* all
684 * we need is just to stop RX channel. This seems to work on all
687 emac_rx_disable(dev);
688 if (rmr & EMAC_RMR_MAE)
690 out_be32(&p->rmr, rmr);
695 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
697 struct ocp_func_emac_data *emacdata = dev->def->additions;
698 int rx_sync_size = emac_rx_sync_size(new_mtu);
699 int rx_skb_size = emac_rx_skb_size(new_mtu);
702 emac_rx_disable(dev);
703 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
705 if (dev->rx_sg_skb) {
706 ++dev->estats.rx_dropped_resize;
707 dev_kfree_skb(dev->rx_sg_skb);
708 dev->rx_sg_skb = NULL;
711 /* Make a first pass over RX ring and mark BDs ready, dropping
712 * non-processed packets on the way. We need this as a separate pass
713 * to simplify error recovery in the case of allocation failure later.
715 for (i = 0; i < NUM_RX_BUFF; ++i) {
716 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
717 ++dev->estats.rx_dropped_resize;
719 dev->rx_desc[i].data_len = 0;
720 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
721 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
724 /* Reallocate RX ring only if bigger skb buffers are required */
725 if (rx_skb_size <= dev->rx_skb_size)
728 /* Second pass, allocate new skbs */
729 for (i = 0; i < NUM_RX_BUFF; ++i) {
730 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
736 BUG_ON(!dev->rx_skb[i]);
737 dev_kfree_skb(dev->rx_skb[i]);
739 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
740 dev->rx_desc[i].data_ptr =
741 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
742 DMA_FROM_DEVICE) + 2;
743 dev->rx_skb[i] = skb;
746 /* Check if we need to change "Jumbo" bit in MR1 */
747 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
748 /* This is to prevent starting RX channel in emac_rx_enable() */
749 dev->commac.rx_stopped = 1;
751 dev->ndev->mtu = new_mtu;
752 emac_full_tx_reset(dev->ndev);
755 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
758 dev->commac.rx_stopped = dev->rx_slot = 0;
759 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
765 /* Process ctx, rtnl_lock semaphore */
766 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
768 struct ocp_enet_private *dev = ndev->priv;
771 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
774 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
777 if (netif_running(ndev)) {
778 /* Check if we really need to reinitalize RX ring */
779 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
780 ret = emac_resize_rx_ring(dev, new_mtu);
785 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
786 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
793 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
796 for (i = 0; i < NUM_TX_BUFF; ++i) {
797 if (dev->tx_skb[i]) {
798 dev_kfree_skb(dev->tx_skb[i]);
799 dev->tx_skb[i] = NULL;
800 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
801 ++dev->estats.tx_dropped;
803 dev->tx_desc[i].ctrl = 0;
804 dev->tx_desc[i].data_ptr = 0;
808 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
811 for (i = 0; i < NUM_RX_BUFF; ++i)
812 if (dev->rx_skb[i]) {
813 dev->rx_desc[i].ctrl = 0;
814 dev_kfree_skb(dev->rx_skb[i]);
815 dev->rx_skb[i] = NULL;
816 dev->rx_desc[i].data_ptr = 0;
819 if (dev->rx_sg_skb) {
820 dev_kfree_skb(dev->rx_sg_skb);
821 dev->rx_sg_skb = NULL;
825 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
828 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
832 dev->rx_skb[slot] = skb;
833 dev->rx_desc[slot].data_len = 0;
835 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
836 dev->rx_desc[slot].data_ptr =
837 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
838 DMA_FROM_DEVICE) + 2;
840 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
841 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
846 static void emac_print_link_status(struct ocp_enet_private *dev)
848 if (netif_carrier_ok(dev->ndev))
849 printk(KERN_INFO "%s: link is up, %d %s%s\n",
850 dev->ndev->name, dev->phy.speed,
851 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
852 dev->phy.pause ? ", pause enabled" :
853 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
855 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
858 /* Process ctx, rtnl_lock semaphore */
859 static int emac_open(struct net_device *ndev)
861 struct ocp_enet_private *dev = ndev->priv;
862 struct ocp_func_emac_data *emacdata = dev->def->additions;
865 DBG("%d: open" NL, dev->def->index);
867 /* Setup error IRQ handler */
868 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
870 printk(KERN_ERR "%s: failed to request IRQ %d\n",
871 ndev->name, dev->def->irq);
875 /* Allocate RX ring */
876 for (i = 0; i < NUM_RX_BUFF; ++i)
877 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
878 printk(KERN_ERR "%s: failed to allocate RX ring\n",
884 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
885 dev->commac.rx_stopped = 0;
886 dev->rx_sg_skb = NULL;
888 if (dev->phy.address >= 0) {
889 int link_poll_interval;
890 if (dev->phy.def->ops->poll_link(&dev->phy)) {
891 dev->phy.def->ops->read_link(&dev->phy);
892 EMAC_RX_CLK_DEFAULT(dev->def->index);
893 netif_carrier_on(dev->ndev);
894 link_poll_interval = PHY_POLL_LINK_ON;
896 EMAC_RX_CLK_TX(dev->def->index);
897 netif_carrier_off(dev->ndev);
898 link_poll_interval = PHY_POLL_LINK_OFF;
900 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
901 emac_print_link_status(dev);
903 netif_carrier_on(dev->ndev);
906 mal_poll_add(dev->mal, &dev->commac);
907 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
908 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
909 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
912 netif_start_queue(ndev);
917 emac_clean_rx_ring(dev);
918 free_irq(dev->def->irq, dev);
923 static int emac_link_differs(struct ocp_enet_private *dev)
925 u32 r = in_be32(&dev->emacp->mr1);
927 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
928 int speed, pause, asym_pause;
930 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
932 else if (r & EMAC_MR1_MF_100)
937 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
938 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
947 pause = asym_pause = 0;
949 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
950 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
954 static void emac_link_timer(unsigned long data)
956 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
957 int link_poll_interval;
959 DBG2("%d: link timer" NL, dev->def->index);
961 if (dev->phy.def->ops->poll_link(&dev->phy)) {
962 if (!netif_carrier_ok(dev->ndev)) {
963 EMAC_RX_CLK_DEFAULT(dev->def->index);
965 /* Get new link parameters */
966 dev->phy.def->ops->read_link(&dev->phy);
968 if (dev->tah_dev || emac_link_differs(dev))
969 emac_full_tx_reset(dev->ndev);
971 netif_carrier_on(dev->ndev);
972 emac_print_link_status(dev);
974 link_poll_interval = PHY_POLL_LINK_ON;
976 if (netif_carrier_ok(dev->ndev)) {
977 EMAC_RX_CLK_TX(dev->def->index);
978 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
979 emac_reinitialize(dev);
981 netif_carrier_off(dev->ndev);
982 emac_print_link_status(dev);
985 /* Retry reset if the previous attempt failed.
986 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
987 * case, but I left it here because it shouldn't trigger for
990 if (unlikely(dev->reset_failed))
991 emac_reinitialize(dev);
993 link_poll_interval = PHY_POLL_LINK_OFF;
995 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
999 static void emac_force_link_update(struct ocp_enet_private *dev)
1001 netif_carrier_off(dev->ndev);
1002 if (timer_pending(&dev->link_timer))
1003 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1006 /* Process ctx, rtnl_lock semaphore */
1007 static int emac_close(struct net_device *ndev)
1009 struct ocp_enet_private *dev = ndev->priv;
1010 struct ocp_func_emac_data *emacdata = dev->def->additions;
1012 DBG("%d: close" NL, dev->def->index);
1016 if (dev->phy.address >= 0)
1017 del_timer_sync(&dev->link_timer);
1019 netif_stop_queue(ndev);
1020 emac_rx_disable(dev);
1021 emac_tx_disable(dev);
1022 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1023 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1024 mal_poll_del(dev->mal, &dev->commac);
1027 emac_clean_tx_ring(dev);
1028 emac_clean_rx_ring(dev);
1029 free_irq(dev->def->irq, dev);
1034 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1035 struct sk_buff *skb)
1037 #if defined(CONFIG_IBM_EMAC_TAH)
1038 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1039 ++dev->stats.tx_packets_csum;
1040 return EMAC_TX_CTRL_TAH_CSUM;
1046 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1048 struct emac_regs __iomem *p = dev->emacp;
1049 struct net_device *ndev = dev->ndev;
1051 /* Send the packet out */
1052 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1054 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1055 netif_stop_queue(ndev);
1056 DBG2("%d: stopped TX queue" NL, dev->def->index);
1059 ndev->trans_start = jiffies;
1060 ++dev->stats.tx_packets;
1061 dev->stats.tx_bytes += len;
1067 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1069 struct ocp_enet_private *dev = ndev->priv;
1070 unsigned int len = skb->len;
1073 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1074 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1076 slot = dev->tx_slot++;
1077 if (dev->tx_slot == NUM_TX_BUFF) {
1079 ctrl |= MAL_TX_CTRL_WRAP;
1082 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1084 dev->tx_skb[slot] = skb;
1085 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1087 dev->tx_desc[slot].data_len = (u16) len;
1089 dev->tx_desc[slot].ctrl = ctrl;
1091 return emac_xmit_finish(dev, len);
1094 #if defined(CONFIG_IBM_EMAC_TAH)
1095 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1096 u32 pd, int len, int last, u16 base_ctrl)
1099 u16 ctrl = base_ctrl;
1100 int chunk = min(len, MAL_MAX_TX_SIZE);
1103 slot = (slot + 1) % NUM_TX_BUFF;
1106 ctrl |= MAL_TX_CTRL_LAST;
1107 if (slot == NUM_TX_BUFF - 1)
1108 ctrl |= MAL_TX_CTRL_WRAP;
1110 dev->tx_skb[slot] = NULL;
1111 dev->tx_desc[slot].data_ptr = pd;
1112 dev->tx_desc[slot].data_len = (u16) chunk;
1113 dev->tx_desc[slot].ctrl = ctrl;
1124 /* BHs disabled (SG version for TAH equipped EMACs) */
1125 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1127 struct ocp_enet_private *dev = ndev->priv;
1128 int nr_frags = skb_shinfo(skb)->nr_frags;
1129 int len = skb->len, chunk;
1134 /* This is common "fast" path */
1135 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1136 return emac_start_xmit(skb, ndev);
1138 len -= skb->data_len;
1140 /* Note, this is only an *estimation*, we can still run out of empty
1141 * slots because of the additional fragmentation into
1142 * MAL_MAX_TX_SIZE-sized chunks
1144 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1147 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1148 emac_tx_csum(dev, skb);
1149 slot = dev->tx_slot;
1152 dev->tx_skb[slot] = NULL;
1153 chunk = min(len, MAL_MAX_TX_SIZE);
1154 dev->tx_desc[slot].data_ptr = pd =
1155 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1156 dev->tx_desc[slot].data_len = (u16) chunk;
1159 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1162 for (i = 0; i < nr_frags; ++i) {
1163 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1166 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1169 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1172 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1176 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1177 dev->tx_slot, slot);
1179 /* Attach skb to the last slot so we don't release it too early */
1180 dev->tx_skb[slot] = skb;
1182 /* Send the packet out */
1183 if (dev->tx_slot == NUM_TX_BUFF - 1)
1184 ctrl |= MAL_TX_CTRL_WRAP;
1186 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1187 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1189 return emac_xmit_finish(dev, skb->len);
1192 /* Well, too bad. Our previous estimation was overly optimistic.
1195 while (slot != dev->tx_slot) {
1196 dev->tx_desc[slot].ctrl = 0;
1199 slot = NUM_TX_BUFF - 1;
1201 ++dev->estats.tx_undo;
1204 netif_stop_queue(ndev);
1205 DBG2("%d: stopped TX queue" NL, dev->def->index);
1209 # define emac_start_xmit_sg emac_start_xmit
1210 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1213 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1215 struct ibm_emac_error_stats *st = &dev->estats;
1216 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1219 if (ctrl & EMAC_TX_ST_BFCS)
1220 ++st->tx_bd_bad_fcs;
1221 if (ctrl & EMAC_TX_ST_LCS)
1222 ++st->tx_bd_carrier_loss;
1223 if (ctrl & EMAC_TX_ST_ED)
1224 ++st->tx_bd_excessive_deferral;
1225 if (ctrl & EMAC_TX_ST_EC)
1226 ++st->tx_bd_excessive_collisions;
1227 if (ctrl & EMAC_TX_ST_LC)
1228 ++st->tx_bd_late_collision;
1229 if (ctrl & EMAC_TX_ST_MC)
1230 ++st->tx_bd_multple_collisions;
1231 if (ctrl & EMAC_TX_ST_SC)
1232 ++st->tx_bd_single_collision;
1233 if (ctrl & EMAC_TX_ST_UR)
1234 ++st->tx_bd_underrun;
1235 if (ctrl & EMAC_TX_ST_SQE)
1239 static void emac_poll_tx(void *param)
1241 struct ocp_enet_private *dev = param;
1242 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1247 int slot = dev->ack_slot, n = 0;
1249 ctrl = dev->tx_desc[slot].ctrl;
1250 if (!(ctrl & MAL_TX_CTRL_READY)) {
1251 struct sk_buff *skb = dev->tx_skb[slot];
1256 dev->tx_skb[slot] = NULL;
1258 slot = (slot + 1) % NUM_TX_BUFF;
1260 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1261 emac_parse_tx_error(dev, ctrl);
1267 dev->ack_slot = slot;
1268 if (netif_queue_stopped(dev->ndev) &&
1269 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1270 netif_wake_queue(dev->ndev);
1272 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1277 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1280 struct sk_buff *skb = dev->rx_skb[slot];
1281 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1284 dma_map_single(dev->ldev, skb->data - 2,
1285 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1287 dev->rx_desc[slot].data_len = 0;
1289 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1290 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1293 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1295 struct ibm_emac_error_stats *st = &dev->estats;
1296 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1299 if (ctrl & EMAC_RX_ST_OE)
1300 ++st->rx_bd_overrun;
1301 if (ctrl & EMAC_RX_ST_BP)
1302 ++st->rx_bd_bad_packet;
1303 if (ctrl & EMAC_RX_ST_RP)
1304 ++st->rx_bd_runt_packet;
1305 if (ctrl & EMAC_RX_ST_SE)
1306 ++st->rx_bd_short_event;
1307 if (ctrl & EMAC_RX_ST_AE)
1308 ++st->rx_bd_alignment_error;
1309 if (ctrl & EMAC_RX_ST_BFCS)
1310 ++st->rx_bd_bad_fcs;
1311 if (ctrl & EMAC_RX_ST_PTL)
1312 ++st->rx_bd_packet_too_long;
1313 if (ctrl & EMAC_RX_ST_ORE)
1314 ++st->rx_bd_out_of_range;
1315 if (ctrl & EMAC_RX_ST_IRE)
1316 ++st->rx_bd_in_range;
1319 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1320 struct sk_buff *skb, u16 ctrl)
1322 #if defined(CONFIG_IBM_EMAC_TAH)
1323 if (!ctrl && dev->tah_dev) {
1324 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325 ++dev->stats.rx_packets_csum;
1330 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1332 if (likely(dev->rx_sg_skb != NULL)) {
1333 int len = dev->rx_desc[slot].data_len;
1334 int tot_len = dev->rx_sg_skb->len + len;
1336 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1337 ++dev->estats.rx_dropped_mtu;
1338 dev_kfree_skb(dev->rx_sg_skb);
1339 dev->rx_sg_skb = NULL;
1341 cacheable_memcpy(dev->rx_sg_skb->tail,
1342 dev->rx_skb[slot]->data, len);
1343 skb_put(dev->rx_sg_skb, len);
1344 emac_recycle_rx_skb(dev, slot, len);
1348 emac_recycle_rx_skb(dev, slot, 0);
1353 static int emac_poll_rx(void *param, int budget)
1355 struct ocp_enet_private *dev = param;
1356 int slot = dev->rx_slot, received = 0;
1358 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1361 while (budget > 0) {
1363 struct sk_buff *skb;
1364 u16 ctrl = dev->rx_desc[slot].ctrl;
1366 if (ctrl & MAL_RX_CTRL_EMPTY)
1369 skb = dev->rx_skb[slot];
1371 len = dev->rx_desc[slot].data_len;
1373 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1376 ctrl &= EMAC_BAD_RX_MASK;
1377 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1378 emac_parse_rx_error(dev, ctrl);
1379 ++dev->estats.rx_dropped_error;
1380 emac_recycle_rx_skb(dev, slot, 0);
1385 if (len && len < EMAC_RX_COPY_THRESH) {
1386 struct sk_buff *copy_skb =
1387 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1388 if (unlikely(!copy_skb))
1391 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1392 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1394 emac_recycle_rx_skb(dev, slot, len);
1396 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1401 skb->dev = dev->ndev;
1402 skb->protocol = eth_type_trans(skb, dev->ndev);
1403 emac_rx_csum(dev, skb, ctrl);
1405 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1406 ++dev->estats.rx_dropped_stack;
1408 ++dev->stats.rx_packets;
1410 dev->stats.rx_bytes += len;
1411 slot = (slot + 1) % NUM_RX_BUFF;
1416 if (ctrl & MAL_RX_CTRL_FIRST) {
1417 BUG_ON(dev->rx_sg_skb);
1418 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1419 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1420 ++dev->estats.rx_dropped_oom;
1421 emac_recycle_rx_skb(dev, slot, 0);
1423 dev->rx_sg_skb = skb;
1426 } else if (!emac_rx_sg_append(dev, slot) &&
1427 (ctrl & MAL_RX_CTRL_LAST)) {
1429 skb = dev->rx_sg_skb;
1430 dev->rx_sg_skb = NULL;
1432 ctrl &= EMAC_BAD_RX_MASK;
1433 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1434 emac_parse_rx_error(dev, ctrl);
1435 ++dev->estats.rx_dropped_error;
1443 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1444 /* Drop the packet and recycle skb */
1445 ++dev->estats.rx_dropped_oom;
1446 emac_recycle_rx_skb(dev, slot, 0);
1451 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1452 dev->rx_slot = slot;
1455 if (unlikely(budget && dev->commac.rx_stopped)) {
1456 struct ocp_func_emac_data *emacdata = dev->def->additions;
1459 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1460 DBG2("%d: rx restart" NL, dev->def->index);
1465 if (dev->rx_sg_skb) {
1466 DBG2("%d: dropping partial rx packet" NL,
1468 ++dev->estats.rx_dropped_error;
1469 dev_kfree_skb(dev->rx_sg_skb);
1470 dev->rx_sg_skb = NULL;
1473 dev->commac.rx_stopped = 0;
1474 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1475 emac_rx_enable(dev);
1482 static int emac_peek_rx(void *param)
1484 struct ocp_enet_private *dev = param;
1485 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1489 static int emac_peek_rx_sg(void *param)
1491 struct ocp_enet_private *dev = param;
1492 int slot = dev->rx_slot;
1494 u16 ctrl = dev->rx_desc[slot].ctrl;
1495 if (ctrl & MAL_RX_CTRL_EMPTY)
1497 else if (ctrl & MAL_RX_CTRL_LAST)
1500 slot = (slot + 1) % NUM_RX_BUFF;
1502 /* I'm just being paranoid here :) */
1503 if (unlikely(slot == dev->rx_slot))
1509 static void emac_rxde(void *param)
1511 struct ocp_enet_private *dev = param;
1512 ++dev->estats.rx_stopped;
1513 emac_rx_disable_async(dev);
1517 static irqreturn_t emac_irq(int irq, void *dev_instance)
1519 struct ocp_enet_private *dev = dev_instance;
1520 struct emac_regs __iomem *p = dev->emacp;
1521 struct ibm_emac_error_stats *st = &dev->estats;
1523 u32 isr = in_be32(&p->isr);
1524 out_be32(&p->isr, isr);
1526 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1528 if (isr & EMAC_ISR_TXPE)
1530 if (isr & EMAC_ISR_RXPE)
1532 if (isr & EMAC_ISR_TXUE)
1534 if (isr & EMAC_ISR_RXOE)
1535 ++st->rx_fifo_overrun;
1536 if (isr & EMAC_ISR_OVR)
1538 if (isr & EMAC_ISR_BP)
1539 ++st->rx_bad_packet;
1540 if (isr & EMAC_ISR_RP)
1541 ++st->rx_runt_packet;
1542 if (isr & EMAC_ISR_SE)
1543 ++st->rx_short_event;
1544 if (isr & EMAC_ISR_ALE)
1545 ++st->rx_alignment_error;
1546 if (isr & EMAC_ISR_BFCS)
1548 if (isr & EMAC_ISR_PTLE)
1549 ++st->rx_packet_too_long;
1550 if (isr & EMAC_ISR_ORE)
1551 ++st->rx_out_of_range;
1552 if (isr & EMAC_ISR_IRE)
1554 if (isr & EMAC_ISR_SQE)
1556 if (isr & EMAC_ISR_TE)
1562 static struct net_device_stats *emac_stats(struct net_device *ndev)
1564 struct ocp_enet_private *dev = ndev->priv;
1565 struct ibm_emac_stats *st = &dev->stats;
1566 struct ibm_emac_error_stats *est = &dev->estats;
1567 struct net_device_stats *nst = &dev->nstats;
1569 DBG2("%d: stats" NL, dev->def->index);
1571 /* Compute "legacy" statistics */
1572 local_irq_disable();
1573 nst->rx_packets = (unsigned long)st->rx_packets;
1574 nst->rx_bytes = (unsigned long)st->rx_bytes;
1575 nst->tx_packets = (unsigned long)st->tx_packets;
1576 nst->tx_bytes = (unsigned long)st->tx_bytes;
1577 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1578 est->rx_dropped_error +
1579 est->rx_dropped_resize +
1580 est->rx_dropped_mtu);
1581 nst->tx_dropped = (unsigned long)est->tx_dropped;
1583 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1584 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1585 est->rx_fifo_overrun +
1587 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1588 est->rx_alignment_error);
1589 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1591 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1592 est->rx_bd_short_event +
1593 est->rx_bd_packet_too_long +
1594 est->rx_bd_out_of_range +
1595 est->rx_bd_in_range +
1596 est->rx_runt_packet +
1597 est->rx_short_event +
1598 est->rx_packet_too_long +
1599 est->rx_out_of_range +
1602 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1603 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1605 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1606 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1607 est->tx_bd_excessive_collisions +
1608 est->tx_bd_late_collision +
1609 est->tx_bd_multple_collisions);
1614 static void emac_remove(struct ocp_device *ocpdev)
1616 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1618 DBG("%d: remove" NL, dev->def->index);
1620 ocp_set_drvdata(ocpdev, NULL);
1621 unregister_netdev(dev->ndev);
1623 tah_fini(dev->tah_dev);
1624 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1625 zmii_fini(dev->zmii_dev, dev->zmii_input);
1627 emac_dbg_register(dev->def->index, NULL);
1629 mal_unregister_commac(dev->mal, &dev->commac);
1630 iounmap(dev->emacp);
1634 static struct mal_commac_ops emac_commac_ops = {
1635 .poll_tx = &emac_poll_tx,
1636 .poll_rx = &emac_poll_rx,
1637 .peek_rx = &emac_peek_rx,
1641 static struct mal_commac_ops emac_commac_sg_ops = {
1642 .poll_tx = &emac_poll_tx,
1643 .poll_rx = &emac_poll_rx,
1644 .peek_rx = &emac_peek_rx_sg,
1648 /* Ethtool support */
1649 static int emac_ethtool_get_settings(struct net_device *ndev,
1650 struct ethtool_cmd *cmd)
1652 struct ocp_enet_private *dev = ndev->priv;
1654 cmd->supported = dev->phy.features;
1655 cmd->port = PORT_MII;
1656 cmd->phy_address = dev->phy.address;
1658 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1661 cmd->advertising = dev->phy.advertising;
1662 cmd->autoneg = dev->phy.autoneg;
1663 cmd->speed = dev->phy.speed;
1664 cmd->duplex = dev->phy.duplex;
1670 static int emac_ethtool_set_settings(struct net_device *ndev,
1671 struct ethtool_cmd *cmd)
1673 struct ocp_enet_private *dev = ndev->priv;
1674 u32 f = dev->phy.features;
1676 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1677 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1679 /* Basic sanity checks */
1680 if (dev->phy.address < 0)
1682 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1684 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1686 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1689 if (cmd->autoneg == AUTONEG_DISABLE) {
1690 switch (cmd->speed) {
1692 if (cmd->duplex == DUPLEX_HALF
1693 && !(f & SUPPORTED_10baseT_Half))
1695 if (cmd->duplex == DUPLEX_FULL
1696 && !(f & SUPPORTED_10baseT_Full))
1700 if (cmd->duplex == DUPLEX_HALF
1701 && !(f & SUPPORTED_100baseT_Half))
1703 if (cmd->duplex == DUPLEX_FULL
1704 && !(f & SUPPORTED_100baseT_Full))
1708 if (cmd->duplex == DUPLEX_HALF
1709 && !(f & SUPPORTED_1000baseT_Half))
1711 if (cmd->duplex == DUPLEX_FULL
1712 && !(f & SUPPORTED_1000baseT_Full))
1720 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1724 if (!(f & SUPPORTED_Autoneg))
1728 dev->phy.def->ops->setup_aneg(&dev->phy,
1729 (cmd->advertising & f) |
1730 (dev->phy.advertising &
1732 ADVERTISED_Asym_Pause)));
1734 emac_force_link_update(dev);
1740 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1741 struct ethtool_ringparam *rp)
1743 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1744 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1747 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1748 struct ethtool_pauseparam *pp)
1750 struct ocp_enet_private *dev = ndev->priv;
1753 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1754 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1757 if (dev->phy.duplex == DUPLEX_FULL) {
1759 pp->rx_pause = pp->tx_pause = 1;
1760 else if (dev->phy.asym_pause)
1766 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1768 struct ocp_enet_private *dev = ndev->priv;
1769 return dev->tah_dev != 0;
1772 static int emac_get_regs_len(struct ocp_enet_private *dev)
1774 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1777 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1779 struct ocp_enet_private *dev = ndev->priv;
1780 return sizeof(struct emac_ethtool_regs_hdr) +
1781 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1782 zmii_get_regs_len(dev->zmii_dev) +
1783 rgmii_get_regs_len(dev->rgmii_dev) +
1784 tah_get_regs_len(dev->tah_dev);
1787 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1789 struct emac_ethtool_regs_subhdr *hdr = buf;
1791 hdr->version = EMAC_ETHTOOL_REGS_VER;
1792 hdr->index = dev->def->index;
1793 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1794 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1797 static void emac_ethtool_get_regs(struct net_device *ndev,
1798 struct ethtool_regs *regs, void *buf)
1800 struct ocp_enet_private *dev = ndev->priv;
1801 struct emac_ethtool_regs_hdr *hdr = buf;
1803 hdr->components = 0;
1806 local_irq_disable();
1807 buf = mal_dump_regs(dev->mal, buf);
1808 buf = emac_dump_regs(dev, buf);
1809 if (dev->zmii_dev) {
1810 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1811 buf = zmii_dump_regs(dev->zmii_dev, buf);
1813 if (dev->rgmii_dev) {
1814 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1815 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1818 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1819 buf = tah_dump_regs(dev->tah_dev, buf);
1824 static int emac_ethtool_nway_reset(struct net_device *ndev)
1826 struct ocp_enet_private *dev = ndev->priv;
1829 DBG("%d: nway_reset" NL, dev->def->index);
1831 if (dev->phy.address < 0)
1835 if (!dev->phy.autoneg) {
1840 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1841 emac_force_link_update(dev);
1848 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1850 return EMAC_ETHTOOL_STATS_COUNT;
1853 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1856 if (stringset == ETH_SS_STATS)
1857 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1860 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1861 struct ethtool_stats *estats,
1864 struct ocp_enet_private *dev = ndev->priv;
1865 local_irq_disable();
1866 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1867 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1868 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1872 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1873 struct ethtool_drvinfo *info)
1875 struct ocp_enet_private *dev = ndev->priv;
1877 strcpy(info->driver, "ibm_emac");
1878 strcpy(info->version, DRV_VERSION);
1879 info->fw_version[0] = '\0';
1880 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1881 info->n_stats = emac_ethtool_get_stats_count(ndev);
1882 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1885 static const struct ethtool_ops emac_ethtool_ops = {
1886 .get_settings = emac_ethtool_get_settings,
1887 .set_settings = emac_ethtool_set_settings,
1888 .get_drvinfo = emac_ethtool_get_drvinfo,
1890 .get_regs_len = emac_ethtool_get_regs_len,
1891 .get_regs = emac_ethtool_get_regs,
1893 .nway_reset = emac_ethtool_nway_reset,
1895 .get_ringparam = emac_ethtool_get_ringparam,
1896 .get_pauseparam = emac_ethtool_get_pauseparam,
1898 .get_rx_csum = emac_ethtool_get_rx_csum,
1900 .get_strings = emac_ethtool_get_strings,
1901 .get_stats_count = emac_ethtool_get_stats_count,
1902 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1904 .get_link = ethtool_op_get_link,
1905 .get_tx_csum = ethtool_op_get_tx_csum,
1906 .get_sg = ethtool_op_get_sg,
1909 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1911 struct ocp_enet_private *dev = ndev->priv;
1912 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1914 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1916 if (dev->phy.address < 0)
1921 case SIOCDEVPRIVATE:
1922 data[0] = dev->phy.address;
1925 case SIOCDEVPRIVATE + 1:
1926 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1930 case SIOCDEVPRIVATE + 2:
1931 if (!capable(CAP_NET_ADMIN))
1933 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1940 static int __init emac_probe(struct ocp_device *ocpdev)
1942 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1943 struct net_device *ndev;
1944 struct ocp_device *maldev;
1945 struct ocp_enet_private *dev;
1948 DBG("%d: probe" NL, ocpdev->def->index);
1951 printk(KERN_ERR "emac%d: Missing additional data!\n",
1952 ocpdev->def->index);
1956 /* Allocate our net_device structure */
1957 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1959 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1960 ocpdev->def->index);
1965 dev->ldev = &ocpdev->dev;
1966 dev->def = ocpdev->def;
1967 SET_MODULE_OWNER(ndev);
1969 /* Find MAL device we are connected to */
1971 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1973 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1974 dev->def->index, emacdata->mal_idx);
1978 dev->mal = ocp_get_drvdata(maldev);
1980 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1981 dev->def->index, emacdata->mal_idx);
1986 /* Register with MAL */
1987 dev->commac.ops = &emac_commac_ops;
1988 dev->commac.dev = dev;
1989 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1990 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1991 err = mal_register_commac(dev->mal, &dev->commac);
1993 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1994 dev->def->index, emacdata->mal_idx);
1997 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1998 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2000 /* Get pointers to BD rings */
2002 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2003 emacdata->mal_tx_chan);
2005 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2006 emacdata->mal_rx_chan);
2008 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2009 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2012 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2013 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2015 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2016 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2017 struct ocp_device *mdiodev =
2018 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2019 emacdata->mdio_idx);
2021 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2022 dev->def->index, emacdata->mdio_idx);
2026 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2027 if (!dev->mdio_dev) {
2029 "emac%d: emac%d hasn't been initialized yet!\n",
2030 dev->def->index, emacdata->mdio_idx);
2036 /* Attach to ZMII, if needed */
2037 if ((err = zmii_attach(dev)) != 0)
2040 /* Attach to RGMII, if needed */
2041 if ((err = rgmii_attach(dev)) != 0)
2044 /* Attach to TAH, if needed */
2045 if ((err = tah_attach(dev)) != 0)
2049 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2051 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2057 /* Fill in MAC address */
2058 for (i = 0; i < 6; ++i)
2059 ndev->dev_addr[i] = emacdata->mac_addr[i];
2061 /* Set some link defaults before we can find out real parameters */
2062 dev->phy.speed = SPEED_100;
2063 dev->phy.duplex = DUPLEX_FULL;
2064 dev->phy.autoneg = AUTONEG_DISABLE;
2065 dev->phy.pause = dev->phy.asym_pause = 0;
2066 dev->stop_timeout = STOP_TIMEOUT_100;
2067 init_timer(&dev->link_timer);
2068 dev->link_timer.function = emac_link_timer;
2069 dev->link_timer.data = (unsigned long)dev;
2071 /* Find PHY if any */
2072 dev->phy.dev = ndev;
2073 dev->phy.mode = emacdata->phy_mode;
2074 if (emacdata->phy_map != 0xffffffff) {
2075 u32 phy_map = emacdata->phy_map | busy_phy_map;
2078 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2079 emacdata->phy_map, busy_phy_map);
2081 EMAC_RX_CLK_TX(dev->def->index);
2083 dev->phy.mdio_read = emac_mdio_read;
2084 dev->phy.mdio_write = emac_mdio_write;
2086 /* Configure EMAC with defaults so we can at least use MDIO
2087 * This is needed mostly for 440GX
2089 if (emac_phy_gpcs(dev->phy.mode)) {
2091 * Make GPCS PHY address equal to EMAC index.
2092 * We probably should take into account busy_phy_map
2093 * and/or phy_map here.
2095 dev->phy.address = dev->def->index;
2098 emac_configure(dev);
2100 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2101 if (!(phy_map & 1)) {
2103 busy_phy_map |= 1 << i;
2105 /* Quick check if there is a PHY at the address */
2106 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2107 if (r == 0xffff || r < 0)
2109 if (!mii_phy_probe(&dev->phy, i))
2113 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2119 if (dev->phy.def->ops->init)
2120 dev->phy.def->ops->init(&dev->phy);
2122 /* Disable any PHY features not supported by the platform */
2123 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2125 /* Setup initial link parameters */
2126 if (dev->phy.features & SUPPORTED_Autoneg) {
2127 adv = dev->phy.features;
2128 #if !defined(CONFIG_40x)
2129 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2131 /* Restart autonegotiation */
2132 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2134 u32 f = dev->phy.def->features;
2135 int speed = SPEED_10, fd = DUPLEX_HALF;
2137 /* Select highest supported speed/duplex */
2138 if (f & SUPPORTED_1000baseT_Full) {
2141 } else if (f & SUPPORTED_1000baseT_Half)
2143 else if (f & SUPPORTED_100baseT_Full) {
2146 } else if (f & SUPPORTED_100baseT_Half)
2148 else if (f & SUPPORTED_10baseT_Full)
2151 /* Force link parameters */
2152 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2157 /* PHY-less configuration.
2158 * XXX I probably should move these settings to emacdata
2160 dev->phy.address = -1;
2161 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2165 /* Fill in the driver function table */
2166 ndev->open = &emac_open;
2168 ndev->hard_start_xmit = &emac_start_xmit_sg;
2169 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2171 ndev->hard_start_xmit = &emac_start_xmit;
2172 ndev->tx_timeout = &emac_full_tx_reset;
2173 ndev->watchdog_timeo = 5 * HZ;
2174 ndev->stop = &emac_close;
2175 ndev->get_stats = &emac_stats;
2176 ndev->set_multicast_list = &emac_set_multicast_list;
2177 ndev->do_ioctl = &emac_ioctl;
2178 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2179 ndev->change_mtu = &emac_change_mtu;
2180 dev->commac.ops = &emac_commac_sg_ops;
2182 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2184 netif_carrier_off(ndev);
2185 netif_stop_queue(ndev);
2187 err = register_netdev(ndev);
2189 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2190 dev->def->index, err);
2194 ocp_set_drvdata(ocpdev, dev);
2196 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2197 ndev->name, dev->def->index,
2198 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2199 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2201 if (dev->phy.address >= 0)
2202 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2203 dev->phy.def->name, dev->phy.address);
2205 emac_dbg_register(dev->def->index, dev);
2209 iounmap(dev->emacp);
2211 tah_fini(dev->tah_dev);
2213 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2215 zmii_fini(dev->zmii_dev, dev->zmii_input);
2217 mal_unregister_commac(dev->mal, &dev->commac);
2223 static struct ocp_device_id emac_ids[] = {
2224 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2225 { .vendor = OCP_VENDOR_INVALID}
2228 static struct ocp_driver emac_driver = {
2230 .id_table = emac_ids,
2231 .probe = emac_probe,
2232 .remove = emac_remove,
2235 static int __init emac_init(void)
2237 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2245 if (ocp_register_driver(&emac_driver)) {
2247 ocp_unregister_driver(&emac_driver);
2257 static void __exit emac_exit(void)
2260 ocp_unregister_driver(&emac_driver);
2265 module_init(emac_init);
2266 module_exit(emac_exit);