2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
51 * Lack of dma_unmap_???? calls is intentional.
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.53"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71 MODULE_DESCRIPTION(DRV_DESC);
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 static u32 busy_phy_map;
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93 * with PHY RX clock problem.
94 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95 * also allows controlling each EMAC clock
97 static inline void EMAC_RX_CLK_TX(int idx)
100 local_irq_save(flags);
102 #if defined(CONFIG_405EP)
103 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
104 #else /* CONFIG_440EP || CONFIG_440GR */
105 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
108 local_irq_restore(flags);
111 static inline void EMAC_RX_CLK_DEFAULT(int idx)
114 local_irq_save(flags);
116 #if defined(CONFIG_405EP)
117 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
118 #else /* CONFIG_440EP */
119 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
122 local_irq_restore(flags);
125 #define EMAC_RX_CLK_TX(idx) ((void)0)
126 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
129 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
130 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
131 * unfortunately this is less flexible than 440EP case, because it's a global
132 * setting for all EMACs, therefore we do this clock trick only during probe.
134 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
137 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
139 #define EMAC_CLK_INTERNAL ((void)0)
140 #define EMAC_CLK_EXTERNAL ((void)0)
143 /* I don't want to litter system log with timeout errors
144 * when we have brain-damaged PHY.
146 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
149 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150 DBG("%d: %s" NL, dev->def->index, error);
153 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
157 /* PHY polling intervals */
158 #define PHY_POLL_LINK_ON HZ
159 #define PHY_POLL_LINK_OFF (HZ / 5)
161 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
162 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
163 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
164 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
165 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
166 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
167 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
168 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
169 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
170 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
171 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
172 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
173 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
174 "tx_bd_excessive_collisions", "tx_bd_late_collision",
175 "tx_bd_multple_collisions", "tx_bd_single_collision",
176 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
180 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
181 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
183 static inline int emac_phy_supports_gige(int phy_mode)
185 return phy_mode == PHY_MODE_GMII ||
186 phy_mode == PHY_MODE_RGMII ||
187 phy_mode == PHY_MODE_TBI ||
188 phy_mode == PHY_MODE_RTBI;
191 static inline int emac_phy_gpcs(int phy_mode)
193 return phy_mode == PHY_MODE_TBI ||
194 phy_mode == PHY_MODE_RTBI;
197 static inline void emac_tx_enable(struct ocp_enet_private *dev)
199 struct emac_regs *p = dev->emacp;
203 local_irq_save(flags);
205 DBG("%d: tx_enable" NL, dev->def->index);
207 r = in_be32(&p->mr0);
208 if (!(r & EMAC_MR0_TXE))
209 out_be32(&p->mr0, r | EMAC_MR0_TXE);
210 local_irq_restore(flags);
213 static void emac_tx_disable(struct ocp_enet_private *dev)
215 struct emac_regs *p = dev->emacp;
219 local_irq_save(flags);
221 DBG("%d: tx_disable" NL, dev->def->index);
223 r = in_be32(&p->mr0);
224 if (r & EMAC_MR0_TXE) {
226 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
227 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
230 emac_report_timeout_error(dev, "TX disable timeout");
232 local_irq_restore(flags);
235 static void emac_rx_enable(struct ocp_enet_private *dev)
237 struct emac_regs *p = dev->emacp;
241 local_irq_save(flags);
242 if (unlikely(dev->commac.rx_stopped))
245 DBG("%d: rx_enable" NL, dev->def->index);
247 r = in_be32(&p->mr0);
248 if (!(r & EMAC_MR0_RXE)) {
249 if (unlikely(!(r & EMAC_MR0_RXI))) {
250 /* Wait if previous async disable is still in progress */
252 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
255 emac_report_timeout_error(dev,
256 "RX disable timeout");
258 out_be32(&p->mr0, r | EMAC_MR0_RXE);
261 local_irq_restore(flags);
264 static void emac_rx_disable(struct ocp_enet_private *dev)
266 struct emac_regs *p = dev->emacp;
270 local_irq_save(flags);
272 DBG("%d: rx_disable" NL, dev->def->index);
274 r = in_be32(&p->mr0);
275 if (r & EMAC_MR0_RXE) {
277 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
278 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
281 emac_report_timeout_error(dev, "RX disable timeout");
283 local_irq_restore(flags);
286 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
288 struct emac_regs *p = dev->emacp;
292 local_irq_save(flags);
294 DBG("%d: rx_disable_async" NL, dev->def->index);
296 r = in_be32(&p->mr0);
297 if (r & EMAC_MR0_RXE)
298 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
299 local_irq_restore(flags);
302 static int emac_reset(struct ocp_enet_private *dev)
304 struct emac_regs *p = dev->emacp;
308 DBG("%d: reset" NL, dev->def->index);
310 local_irq_save(flags);
312 if (!dev->reset_failed) {
313 /* 40x erratum suggests stopping RX channel before reset,
316 emac_rx_disable(dev);
317 emac_tx_disable(dev);
320 out_be32(&p->mr0, EMAC_MR0_SRST);
321 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
323 local_irq_restore(flags);
326 dev->reset_failed = 0;
329 emac_report_timeout_error(dev, "reset timeout");
330 dev->reset_failed = 1;
335 static void emac_hash_mc(struct ocp_enet_private *dev)
337 struct emac_regs *p = dev->emacp;
339 struct dev_mc_list *dmi;
341 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
343 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
345 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
347 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
348 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
350 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
351 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
353 out_be32(&p->gaht1, gaht[0]);
354 out_be32(&p->gaht2, gaht[1]);
355 out_be32(&p->gaht3, gaht[2]);
356 out_be32(&p->gaht4, gaht[3]);
359 static inline u32 emac_iff2rmr(struct net_device *ndev)
361 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
364 if (ndev->flags & IFF_PROMISC)
366 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
368 else if (ndev->mc_count > 0)
374 static inline int emac_opb_mhz(void)
376 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
380 static int emac_configure(struct ocp_enet_private *dev)
382 struct emac_regs *p = dev->emacp;
383 struct net_device *ndev = dev->ndev;
387 DBG("%d: configure" NL, dev->def->index);
389 if (emac_reset(dev) < 0)
392 tah_reset(dev->tah_dev);
395 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
396 if (dev->phy.duplex == DUPLEX_FULL)
398 switch (dev->phy.speed) {
400 if (emac_phy_gpcs(dev->phy.mode)) {
401 r |= EMAC_MR1_MF_1000GPCS |
402 EMAC_MR1_MF_IPPA(dev->phy.address);
404 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
405 * identify this GPCS PHY later.
407 out_be32(&p->ipcr, 0xdeadbeef);
409 r |= EMAC_MR1_MF_1000;
410 r |= EMAC_MR1_RFS_16K;
413 if (dev->ndev->mtu > ETH_DATA_LEN)
417 r |= EMAC_MR1_MF_100;
420 r |= EMAC_MR1_RFS_4K;
426 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
429 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
431 #if !defined(CONFIG_40x)
432 /* on 40x erratum forces us to NOT use integrated flow control,
433 * let's hope it works on 44x ;)
435 if (dev->phy.duplex == DUPLEX_FULL) {
437 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
438 else if (dev->phy.asym_pause)
442 out_be32(&p->mr1, r);
444 /* Set individual MAC address */
445 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
446 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
447 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
450 /* VLAN Tag Protocol ID */
451 out_be32(&p->vtpid, 0x8100);
453 /* Receive mode register */
454 r = emac_iff2rmr(ndev);
455 if (r & EMAC_RMR_MAE)
457 out_be32(&p->rmr, r);
459 /* FIFOs thresholds */
460 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
461 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
462 out_be32(&p->tmr1, r);
463 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
465 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
466 there should be still enough space in FIFO to allow the our link
467 partner time to process this frame and also time to send PAUSE
470 Here is the worst case scenario for the RX FIFO "headroom"
471 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
473 1) One maximum-length frame on TX 1522 bytes
474 2) One PAUSE frame time 64 bytes
475 3) PAUSE frame decode time allowance 64 bytes
476 4) One maximum-length frame on RX 1522 bytes
477 5) Round-trip propagation delay of the link (100Mb) 15 bytes
481 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
482 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
484 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
485 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
486 out_be32(&p->rwmr, r);
488 /* Set PAUSE timer to the maximum */
489 out_be32(&p->ptr, 0xffff);
492 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
493 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
494 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
495 EMAC_ISR_IRE | EMAC_ISR_TE);
497 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
498 if (emac_phy_gpcs(dev->phy.mode))
499 mii_reset_phy(&dev->phy);
505 static void emac_reinitialize(struct ocp_enet_private *dev)
507 DBG("%d: reinitialize" NL, dev->def->index);
509 if (!emac_configure(dev)) {
516 static void emac_full_tx_reset(struct net_device *ndev)
518 struct ocp_enet_private *dev = ndev->priv;
519 struct ocp_func_emac_data *emacdata = dev->def->additions;
521 DBG("%d: full_tx_reset" NL, dev->def->index);
523 emac_tx_disable(dev);
524 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
525 emac_clean_tx_ring(dev);
526 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
530 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
534 netif_wake_queue(ndev);
537 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
539 struct emac_regs *p = dev->emacp;
543 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
545 /* Enable proper MDIO port */
546 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
548 /* Wait for management interface to become idle */
550 while (!emac_phy_done(in_be32(&p->stacr))) {
556 /* Issue read command */
558 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
559 (reg & EMAC_STACR_PRA_MASK)
560 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
563 /* Wait for read to complete */
565 while (!emac_phy_done(r = in_be32(&p->stacr))) {
571 if (unlikely(r & EMAC_STACR_PHYE)) {
572 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
577 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
578 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
581 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
585 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
588 struct emac_regs *p = dev->emacp;
591 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
594 /* Enable proper MDIO port */
595 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
597 /* Wait for management interface to be idle */
599 while (!emac_phy_done(in_be32(&p->stacr))) {
605 /* Issue write command */
607 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
608 (reg & EMAC_STACR_PRA_MASK) |
609 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
610 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
612 /* Wait for write to complete */
614 while (!emac_phy_done(in_be32(&p->stacr))) {
621 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
624 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
626 struct ocp_enet_private *dev = ndev->priv;
630 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
636 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
638 struct ocp_enet_private *dev = ndev->priv;
641 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
642 (u8) reg, (u16) val);
647 static void emac_set_multicast_list(struct net_device *ndev)
649 struct ocp_enet_private *dev = ndev->priv;
650 struct emac_regs *p = dev->emacp;
651 u32 rmr = emac_iff2rmr(ndev);
653 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
654 BUG_ON(!netif_running(dev->ndev));
656 /* I decided to relax register access rules here to avoid
659 * There is a real problem with EMAC4 core if we use MWSW_001 bit
660 * in MR1 register and do a full EMAC reset.
661 * One TX BD status update is delayed and, after EMAC reset, it
662 * never happens, resulting in TX hung (it'll be recovered by TX
663 * timeout handler eventually, but this is just gross).
664 * So we either have to do full TX reset or try to cheat here :)
666 * The only required change is to RX mode register, so I *think* all
667 * we need is just to stop RX channel. This seems to work on all
670 emac_rx_disable(dev);
671 if (rmr & EMAC_RMR_MAE)
673 out_be32(&p->rmr, rmr);
678 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
680 struct ocp_func_emac_data *emacdata = dev->def->additions;
681 int rx_sync_size = emac_rx_sync_size(new_mtu);
682 int rx_skb_size = emac_rx_skb_size(new_mtu);
685 emac_rx_disable(dev);
686 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
688 if (dev->rx_sg_skb) {
689 ++dev->estats.rx_dropped_resize;
690 dev_kfree_skb(dev->rx_sg_skb);
691 dev->rx_sg_skb = NULL;
694 /* Make a first pass over RX ring and mark BDs ready, dropping
695 * non-processed packets on the way. We need this as a separate pass
696 * to simplify error recovery in the case of allocation failure later.
698 for (i = 0; i < NUM_RX_BUFF; ++i) {
699 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
700 ++dev->estats.rx_dropped_resize;
702 dev->rx_desc[i].data_len = 0;
703 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
704 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
707 /* Reallocate RX ring only if bigger skb buffers are required */
708 if (rx_skb_size <= dev->rx_skb_size)
711 /* Second pass, allocate new skbs */
712 for (i = 0; i < NUM_RX_BUFF; ++i) {
713 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
719 BUG_ON(!dev->rx_skb[i]);
720 dev_kfree_skb(dev->rx_skb[i]);
722 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
723 dev->rx_desc[i].data_ptr =
724 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
725 DMA_FROM_DEVICE) + 2;
726 dev->rx_skb[i] = skb;
729 /* Check if we need to change "Jumbo" bit in MR1 */
730 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
731 /* This is to prevent starting RX channel in emac_rx_enable() */
732 dev->commac.rx_stopped = 1;
734 dev->ndev->mtu = new_mtu;
735 emac_full_tx_reset(dev->ndev);
738 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
741 dev->commac.rx_stopped = dev->rx_slot = 0;
742 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
748 /* Process ctx, rtnl_lock semaphore */
749 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
751 struct ocp_enet_private *dev = ndev->priv;
754 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
757 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
760 if (netif_running(ndev)) {
761 /* Check if we really need to reinitalize RX ring */
762 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
763 ret = emac_resize_rx_ring(dev, new_mtu);
768 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
769 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
776 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
779 for (i = 0; i < NUM_TX_BUFF; ++i) {
780 if (dev->tx_skb[i]) {
781 dev_kfree_skb(dev->tx_skb[i]);
782 dev->tx_skb[i] = NULL;
783 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
784 ++dev->estats.tx_dropped;
786 dev->tx_desc[i].ctrl = 0;
787 dev->tx_desc[i].data_ptr = 0;
791 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
794 for (i = 0; i < NUM_RX_BUFF; ++i)
795 if (dev->rx_skb[i]) {
796 dev->rx_desc[i].ctrl = 0;
797 dev_kfree_skb(dev->rx_skb[i]);
798 dev->rx_skb[i] = NULL;
799 dev->rx_desc[i].data_ptr = 0;
802 if (dev->rx_sg_skb) {
803 dev_kfree_skb(dev->rx_sg_skb);
804 dev->rx_sg_skb = NULL;
808 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
811 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
815 dev->rx_skb[slot] = skb;
816 dev->rx_desc[slot].data_len = 0;
818 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
819 dev->rx_desc[slot].data_ptr =
820 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
821 DMA_FROM_DEVICE) + 2;
823 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
824 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
829 static void emac_print_link_status(struct ocp_enet_private *dev)
831 if (netif_carrier_ok(dev->ndev))
832 printk(KERN_INFO "%s: link is up, %d %s%s\n",
833 dev->ndev->name, dev->phy.speed,
834 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
835 dev->phy.pause ? ", pause enabled" :
836 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
838 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
841 /* Process ctx, rtnl_lock semaphore */
842 static int emac_open(struct net_device *ndev)
844 struct ocp_enet_private *dev = ndev->priv;
845 struct ocp_func_emac_data *emacdata = dev->def->additions;
848 DBG("%d: open" NL, dev->def->index);
850 /* Setup error IRQ handler */
851 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
853 printk(KERN_ERR "%s: failed to request IRQ %d\n",
854 ndev->name, dev->def->irq);
858 /* Allocate RX ring */
859 for (i = 0; i < NUM_RX_BUFF; ++i)
860 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
861 printk(KERN_ERR "%s: failed to allocate RX ring\n",
867 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
868 dev->commac.rx_stopped = 0;
869 dev->rx_sg_skb = NULL;
871 if (dev->phy.address >= 0) {
872 int link_poll_interval;
873 if (dev->phy.def->ops->poll_link(&dev->phy)) {
874 dev->phy.def->ops->read_link(&dev->phy);
875 EMAC_RX_CLK_DEFAULT(dev->def->index);
876 netif_carrier_on(dev->ndev);
877 link_poll_interval = PHY_POLL_LINK_ON;
879 EMAC_RX_CLK_TX(dev->def->index);
880 netif_carrier_off(dev->ndev);
881 link_poll_interval = PHY_POLL_LINK_OFF;
883 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
884 emac_print_link_status(dev);
886 netif_carrier_on(dev->ndev);
889 mal_poll_add(dev->mal, &dev->commac);
890 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
891 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
892 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
895 netif_start_queue(ndev);
900 emac_clean_rx_ring(dev);
901 free_irq(dev->def->irq, dev);
906 static int emac_link_differs(struct ocp_enet_private *dev)
908 u32 r = in_be32(&dev->emacp->mr1);
910 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
911 int speed, pause, asym_pause;
913 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
915 else if (r & EMAC_MR1_MF_100)
920 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
921 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
930 pause = asym_pause = 0;
932 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
933 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
937 static void emac_link_timer(unsigned long data)
939 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
940 int link_poll_interval;
942 DBG2("%d: link timer" NL, dev->def->index);
944 if (dev->phy.def->ops->poll_link(&dev->phy)) {
945 if (!netif_carrier_ok(dev->ndev)) {
946 EMAC_RX_CLK_DEFAULT(dev->def->index);
948 /* Get new link parameters */
949 dev->phy.def->ops->read_link(&dev->phy);
951 if (dev->tah_dev || emac_link_differs(dev))
952 emac_full_tx_reset(dev->ndev);
954 netif_carrier_on(dev->ndev);
955 emac_print_link_status(dev);
957 link_poll_interval = PHY_POLL_LINK_ON;
959 if (netif_carrier_ok(dev->ndev)) {
960 EMAC_RX_CLK_TX(dev->def->index);
961 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
962 emac_reinitialize(dev);
964 netif_carrier_off(dev->ndev);
965 emac_print_link_status(dev);
968 /* Retry reset if the previous attempt failed.
969 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
970 * case, but I left it here because it shouldn't trigger for
973 if (unlikely(dev->reset_failed))
974 emac_reinitialize(dev);
976 link_poll_interval = PHY_POLL_LINK_OFF;
978 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
982 static void emac_force_link_update(struct ocp_enet_private *dev)
984 netif_carrier_off(dev->ndev);
985 if (timer_pending(&dev->link_timer))
986 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
989 /* Process ctx, rtnl_lock semaphore */
990 static int emac_close(struct net_device *ndev)
992 struct ocp_enet_private *dev = ndev->priv;
993 struct ocp_func_emac_data *emacdata = dev->def->additions;
995 DBG("%d: close" NL, dev->def->index);
999 if (dev->phy.address >= 0)
1000 del_timer_sync(&dev->link_timer);
1002 netif_stop_queue(ndev);
1003 emac_rx_disable(dev);
1004 emac_tx_disable(dev);
1005 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1006 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1007 mal_poll_del(dev->mal, &dev->commac);
1010 emac_clean_tx_ring(dev);
1011 emac_clean_rx_ring(dev);
1012 free_irq(dev->def->irq, dev);
1017 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1018 struct sk_buff *skb)
1020 #if defined(CONFIG_IBM_EMAC_TAH)
1021 if (skb->ip_summed == CHECKSUM_HW) {
1022 ++dev->stats.tx_packets_csum;
1023 return EMAC_TX_CTRL_TAH_CSUM;
1029 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1031 struct emac_regs *p = dev->emacp;
1032 struct net_device *ndev = dev->ndev;
1034 /* Send the packet out */
1035 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1037 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1038 netif_stop_queue(ndev);
1039 DBG2("%d: stopped TX queue" NL, dev->def->index);
1042 ndev->trans_start = jiffies;
1043 ++dev->stats.tx_packets;
1044 dev->stats.tx_bytes += len;
1050 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1052 struct ocp_enet_private *dev = ndev->priv;
1053 unsigned int len = skb->len;
1056 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1057 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1059 slot = dev->tx_slot++;
1060 if (dev->tx_slot == NUM_TX_BUFF) {
1062 ctrl |= MAL_TX_CTRL_WRAP;
1065 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1067 dev->tx_skb[slot] = skb;
1068 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1070 dev->tx_desc[slot].data_len = (u16) len;
1072 dev->tx_desc[slot].ctrl = ctrl;
1074 return emac_xmit_finish(dev, len);
1077 #if defined(CONFIG_IBM_EMAC_TAH)
1078 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1079 u32 pd, int len, int last, u16 base_ctrl)
1082 u16 ctrl = base_ctrl;
1083 int chunk = min(len, MAL_MAX_TX_SIZE);
1086 slot = (slot + 1) % NUM_TX_BUFF;
1089 ctrl |= MAL_TX_CTRL_LAST;
1090 if (slot == NUM_TX_BUFF - 1)
1091 ctrl |= MAL_TX_CTRL_WRAP;
1093 dev->tx_skb[slot] = NULL;
1094 dev->tx_desc[slot].data_ptr = pd;
1095 dev->tx_desc[slot].data_len = (u16) chunk;
1096 dev->tx_desc[slot].ctrl = ctrl;
1107 /* BHs disabled (SG version for TAH equipped EMACs) */
1108 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1110 struct ocp_enet_private *dev = ndev->priv;
1111 int nr_frags = skb_shinfo(skb)->nr_frags;
1112 int len = skb->len, chunk;
1117 /* This is common "fast" path */
1118 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1119 return emac_start_xmit(skb, ndev);
1121 len -= skb->data_len;
1123 /* Note, this is only an *estimation*, we can still run out of empty
1124 * slots because of the additional fragmentation into
1125 * MAL_MAX_TX_SIZE-sized chunks
1127 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1130 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1131 emac_tx_csum(dev, skb);
1132 slot = dev->tx_slot;
1135 dev->tx_skb[slot] = NULL;
1136 chunk = min(len, MAL_MAX_TX_SIZE);
1137 dev->tx_desc[slot].data_ptr = pd =
1138 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1139 dev->tx_desc[slot].data_len = (u16) chunk;
1142 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1145 for (i = 0; i < nr_frags; ++i) {
1146 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1149 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1152 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1155 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1159 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1160 dev->tx_slot, slot);
1162 /* Attach skb to the last slot so we don't release it too early */
1163 dev->tx_skb[slot] = skb;
1165 /* Send the packet out */
1166 if (dev->tx_slot == NUM_TX_BUFF - 1)
1167 ctrl |= MAL_TX_CTRL_WRAP;
1169 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1170 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1172 return emac_xmit_finish(dev, skb->len);
1175 /* Well, too bad. Our previous estimation was overly optimistic.
1178 while (slot != dev->tx_slot) {
1179 dev->tx_desc[slot].ctrl = 0;
1182 slot = NUM_TX_BUFF - 1;
1184 ++dev->estats.tx_undo;
1187 netif_stop_queue(ndev);
1188 DBG2("%d: stopped TX queue" NL, dev->def->index);
1192 # define emac_start_xmit_sg emac_start_xmit
1193 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1196 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1198 struct ibm_emac_error_stats *st = &dev->estats;
1199 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1202 if (ctrl & EMAC_TX_ST_BFCS)
1203 ++st->tx_bd_bad_fcs;
1204 if (ctrl & EMAC_TX_ST_LCS)
1205 ++st->tx_bd_carrier_loss;
1206 if (ctrl & EMAC_TX_ST_ED)
1207 ++st->tx_bd_excessive_deferral;
1208 if (ctrl & EMAC_TX_ST_EC)
1209 ++st->tx_bd_excessive_collisions;
1210 if (ctrl & EMAC_TX_ST_LC)
1211 ++st->tx_bd_late_collision;
1212 if (ctrl & EMAC_TX_ST_MC)
1213 ++st->tx_bd_multple_collisions;
1214 if (ctrl & EMAC_TX_ST_SC)
1215 ++st->tx_bd_single_collision;
1216 if (ctrl & EMAC_TX_ST_UR)
1217 ++st->tx_bd_underrun;
1218 if (ctrl & EMAC_TX_ST_SQE)
1222 static void emac_poll_tx(void *param)
1224 struct ocp_enet_private *dev = param;
1225 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1230 int slot = dev->ack_slot, n = 0;
1232 ctrl = dev->tx_desc[slot].ctrl;
1233 if (!(ctrl & MAL_TX_CTRL_READY)) {
1234 struct sk_buff *skb = dev->tx_skb[slot];
1239 dev->tx_skb[slot] = NULL;
1241 slot = (slot + 1) % NUM_TX_BUFF;
1243 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1244 emac_parse_tx_error(dev, ctrl);
1250 dev->ack_slot = slot;
1251 if (netif_queue_stopped(dev->ndev) &&
1252 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1253 netif_wake_queue(dev->ndev);
1255 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1260 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1263 struct sk_buff *skb = dev->rx_skb[slot];
1264 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1267 dma_map_single(dev->ldev, skb->data - 2,
1268 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1270 dev->rx_desc[slot].data_len = 0;
1272 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1273 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1276 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1278 struct ibm_emac_error_stats *st = &dev->estats;
1279 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1282 if (ctrl & EMAC_RX_ST_OE)
1283 ++st->rx_bd_overrun;
1284 if (ctrl & EMAC_RX_ST_BP)
1285 ++st->rx_bd_bad_packet;
1286 if (ctrl & EMAC_RX_ST_RP)
1287 ++st->rx_bd_runt_packet;
1288 if (ctrl & EMAC_RX_ST_SE)
1289 ++st->rx_bd_short_event;
1290 if (ctrl & EMAC_RX_ST_AE)
1291 ++st->rx_bd_alignment_error;
1292 if (ctrl & EMAC_RX_ST_BFCS)
1293 ++st->rx_bd_bad_fcs;
1294 if (ctrl & EMAC_RX_ST_PTL)
1295 ++st->rx_bd_packet_too_long;
1296 if (ctrl & EMAC_RX_ST_ORE)
1297 ++st->rx_bd_out_of_range;
1298 if (ctrl & EMAC_RX_ST_IRE)
1299 ++st->rx_bd_in_range;
1302 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1303 struct sk_buff *skb, u16 ctrl)
1305 #if defined(CONFIG_IBM_EMAC_TAH)
1306 if (!ctrl && dev->tah_dev) {
1307 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 ++dev->stats.rx_packets_csum;
1313 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1315 if (likely(dev->rx_sg_skb != NULL)) {
1316 int len = dev->rx_desc[slot].data_len;
1317 int tot_len = dev->rx_sg_skb->len + len;
1319 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1320 ++dev->estats.rx_dropped_mtu;
1321 dev_kfree_skb(dev->rx_sg_skb);
1322 dev->rx_sg_skb = NULL;
1324 cacheable_memcpy(dev->rx_sg_skb->tail,
1325 dev->rx_skb[slot]->data, len);
1326 skb_put(dev->rx_sg_skb, len);
1327 emac_recycle_rx_skb(dev, slot, len);
1331 emac_recycle_rx_skb(dev, slot, 0);
1336 static int emac_poll_rx(void *param, int budget)
1338 struct ocp_enet_private *dev = param;
1339 int slot = dev->rx_slot, received = 0;
1341 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1344 while (budget > 0) {
1346 struct sk_buff *skb;
1347 u16 ctrl = dev->rx_desc[slot].ctrl;
1349 if (ctrl & MAL_RX_CTRL_EMPTY)
1352 skb = dev->rx_skb[slot];
1354 len = dev->rx_desc[slot].data_len;
1356 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1359 ctrl &= EMAC_BAD_RX_MASK;
1360 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1361 emac_parse_rx_error(dev, ctrl);
1362 ++dev->estats.rx_dropped_error;
1363 emac_recycle_rx_skb(dev, slot, 0);
1368 if (len && len < EMAC_RX_COPY_THRESH) {
1369 struct sk_buff *copy_skb =
1370 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1371 if (unlikely(!copy_skb))
1374 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1375 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1377 emac_recycle_rx_skb(dev, slot, len);
1379 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1384 skb->dev = dev->ndev;
1385 skb->protocol = eth_type_trans(skb, dev->ndev);
1386 emac_rx_csum(dev, skb, ctrl);
1388 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1389 ++dev->estats.rx_dropped_stack;
1391 ++dev->stats.rx_packets;
1393 dev->stats.rx_bytes += len;
1394 slot = (slot + 1) % NUM_RX_BUFF;
1399 if (ctrl & MAL_RX_CTRL_FIRST) {
1400 BUG_ON(dev->rx_sg_skb);
1401 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1402 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1403 ++dev->estats.rx_dropped_oom;
1404 emac_recycle_rx_skb(dev, slot, 0);
1406 dev->rx_sg_skb = skb;
1409 } else if (!emac_rx_sg_append(dev, slot) &&
1410 (ctrl & MAL_RX_CTRL_LAST)) {
1412 skb = dev->rx_sg_skb;
1413 dev->rx_sg_skb = NULL;
1415 ctrl &= EMAC_BAD_RX_MASK;
1416 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1417 emac_parse_rx_error(dev, ctrl);
1418 ++dev->estats.rx_dropped_error;
1426 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1427 /* Drop the packet and recycle skb */
1428 ++dev->estats.rx_dropped_oom;
1429 emac_recycle_rx_skb(dev, slot, 0);
1434 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1435 dev->rx_slot = slot;
1438 if (unlikely(budget && dev->commac.rx_stopped)) {
1439 struct ocp_func_emac_data *emacdata = dev->def->additions;
1442 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1443 DBG2("%d: rx restart" NL, dev->def->index);
1448 if (dev->rx_sg_skb) {
1449 DBG2("%d: dropping partial rx packet" NL,
1451 ++dev->estats.rx_dropped_error;
1452 dev_kfree_skb(dev->rx_sg_skb);
1453 dev->rx_sg_skb = NULL;
1456 dev->commac.rx_stopped = 0;
1457 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1458 emac_rx_enable(dev);
1465 static int emac_peek_rx(void *param)
1467 struct ocp_enet_private *dev = param;
1468 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1472 static int emac_peek_rx_sg(void *param)
1474 struct ocp_enet_private *dev = param;
1475 int slot = dev->rx_slot;
1477 u16 ctrl = dev->rx_desc[slot].ctrl;
1478 if (ctrl & MAL_RX_CTRL_EMPTY)
1480 else if (ctrl & MAL_RX_CTRL_LAST)
1483 slot = (slot + 1) % NUM_RX_BUFF;
1485 /* I'm just being paranoid here :) */
1486 if (unlikely(slot == dev->rx_slot))
1492 static void emac_rxde(void *param)
1494 struct ocp_enet_private *dev = param;
1495 ++dev->estats.rx_stopped;
1496 emac_rx_disable_async(dev);
1500 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1502 struct ocp_enet_private *dev = dev_instance;
1503 struct emac_regs *p = dev->emacp;
1504 struct ibm_emac_error_stats *st = &dev->estats;
1506 u32 isr = in_be32(&p->isr);
1507 out_be32(&p->isr, isr);
1509 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1511 if (isr & EMAC_ISR_TXPE)
1513 if (isr & EMAC_ISR_RXPE)
1515 if (isr & EMAC_ISR_TXUE)
1517 if (isr & EMAC_ISR_RXOE)
1518 ++st->rx_fifo_overrun;
1519 if (isr & EMAC_ISR_OVR)
1521 if (isr & EMAC_ISR_BP)
1522 ++st->rx_bad_packet;
1523 if (isr & EMAC_ISR_RP)
1524 ++st->rx_runt_packet;
1525 if (isr & EMAC_ISR_SE)
1526 ++st->rx_short_event;
1527 if (isr & EMAC_ISR_ALE)
1528 ++st->rx_alignment_error;
1529 if (isr & EMAC_ISR_BFCS)
1531 if (isr & EMAC_ISR_PTLE)
1532 ++st->rx_packet_too_long;
1533 if (isr & EMAC_ISR_ORE)
1534 ++st->rx_out_of_range;
1535 if (isr & EMAC_ISR_IRE)
1537 if (isr & EMAC_ISR_SQE)
1539 if (isr & EMAC_ISR_TE)
1545 static struct net_device_stats *emac_stats(struct net_device *ndev)
1547 struct ocp_enet_private *dev = ndev->priv;
1548 struct ibm_emac_stats *st = &dev->stats;
1549 struct ibm_emac_error_stats *est = &dev->estats;
1550 struct net_device_stats *nst = &dev->nstats;
1552 DBG2("%d: stats" NL, dev->def->index);
1554 /* Compute "legacy" statistics */
1555 local_irq_disable();
1556 nst->rx_packets = (unsigned long)st->rx_packets;
1557 nst->rx_bytes = (unsigned long)st->rx_bytes;
1558 nst->tx_packets = (unsigned long)st->tx_packets;
1559 nst->tx_bytes = (unsigned long)st->tx_bytes;
1560 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1561 est->rx_dropped_error +
1562 est->rx_dropped_resize +
1563 est->rx_dropped_mtu);
1564 nst->tx_dropped = (unsigned long)est->tx_dropped;
1566 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1567 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1568 est->rx_fifo_overrun +
1570 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1571 est->rx_alignment_error);
1572 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1574 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1575 est->rx_bd_short_event +
1576 est->rx_bd_packet_too_long +
1577 est->rx_bd_out_of_range +
1578 est->rx_bd_in_range +
1579 est->rx_runt_packet +
1580 est->rx_short_event +
1581 est->rx_packet_too_long +
1582 est->rx_out_of_range +
1585 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1586 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1588 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1589 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1590 est->tx_bd_excessive_collisions +
1591 est->tx_bd_late_collision +
1592 est->tx_bd_multple_collisions);
1597 static void emac_remove(struct ocp_device *ocpdev)
1599 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1601 DBG("%d: remove" NL, dev->def->index);
1603 ocp_set_drvdata(ocpdev, 0);
1604 unregister_netdev(dev->ndev);
1606 tah_fini(dev->tah_dev);
1607 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1608 zmii_fini(dev->zmii_dev, dev->zmii_input);
1610 emac_dbg_register(dev->def->index, 0);
1612 mal_unregister_commac(dev->mal, &dev->commac);
1613 iounmap((void *)dev->emacp);
1617 static struct mal_commac_ops emac_commac_ops = {
1618 .poll_tx = &emac_poll_tx,
1619 .poll_rx = &emac_poll_rx,
1620 .peek_rx = &emac_peek_rx,
1624 static struct mal_commac_ops emac_commac_sg_ops = {
1625 .poll_tx = &emac_poll_tx,
1626 .poll_rx = &emac_poll_rx,
1627 .peek_rx = &emac_peek_rx_sg,
1631 /* Ethtool support */
1632 static int emac_ethtool_get_settings(struct net_device *ndev,
1633 struct ethtool_cmd *cmd)
1635 struct ocp_enet_private *dev = ndev->priv;
1637 cmd->supported = dev->phy.features;
1638 cmd->port = PORT_MII;
1639 cmd->phy_address = dev->phy.address;
1641 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1644 cmd->advertising = dev->phy.advertising;
1645 cmd->autoneg = dev->phy.autoneg;
1646 cmd->speed = dev->phy.speed;
1647 cmd->duplex = dev->phy.duplex;
1653 static int emac_ethtool_set_settings(struct net_device *ndev,
1654 struct ethtool_cmd *cmd)
1656 struct ocp_enet_private *dev = ndev->priv;
1657 u32 f = dev->phy.features;
1659 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1660 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1662 /* Basic sanity checks */
1663 if (dev->phy.address < 0)
1665 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1667 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1669 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1672 if (cmd->autoneg == AUTONEG_DISABLE) {
1673 switch (cmd->speed) {
1675 if (cmd->duplex == DUPLEX_HALF
1676 && !(f & SUPPORTED_10baseT_Half))
1678 if (cmd->duplex == DUPLEX_FULL
1679 && !(f & SUPPORTED_10baseT_Full))
1683 if (cmd->duplex == DUPLEX_HALF
1684 && !(f & SUPPORTED_100baseT_Half))
1686 if (cmd->duplex == DUPLEX_FULL
1687 && !(f & SUPPORTED_100baseT_Full))
1691 if (cmd->duplex == DUPLEX_HALF
1692 && !(f & SUPPORTED_1000baseT_Half))
1694 if (cmd->duplex == DUPLEX_FULL
1695 && !(f & SUPPORTED_1000baseT_Full))
1703 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1707 if (!(f & SUPPORTED_Autoneg))
1711 dev->phy.def->ops->setup_aneg(&dev->phy,
1712 (cmd->advertising & f) |
1713 (dev->phy.advertising &
1715 ADVERTISED_Asym_Pause)));
1717 emac_force_link_update(dev);
1723 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1724 struct ethtool_ringparam *rp)
1726 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1727 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1730 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1731 struct ethtool_pauseparam *pp)
1733 struct ocp_enet_private *dev = ndev->priv;
1736 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1737 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1740 if (dev->phy.duplex == DUPLEX_FULL) {
1742 pp->rx_pause = pp->tx_pause = 1;
1743 else if (dev->phy.asym_pause)
1749 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1751 struct ocp_enet_private *dev = ndev->priv;
1752 return dev->tah_dev != 0;
1755 static int emac_get_regs_len(struct ocp_enet_private *dev)
1757 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1760 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1762 struct ocp_enet_private *dev = ndev->priv;
1763 return sizeof(struct emac_ethtool_regs_hdr) +
1764 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1765 zmii_get_regs_len(dev->zmii_dev) +
1766 rgmii_get_regs_len(dev->rgmii_dev) +
1767 tah_get_regs_len(dev->tah_dev);
1770 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1772 struct emac_ethtool_regs_subhdr *hdr = buf;
1774 hdr->version = EMAC_ETHTOOL_REGS_VER;
1775 hdr->index = dev->def->index;
1776 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1777 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1780 static void emac_ethtool_get_regs(struct net_device *ndev,
1781 struct ethtool_regs *regs, void *buf)
1783 struct ocp_enet_private *dev = ndev->priv;
1784 struct emac_ethtool_regs_hdr *hdr = buf;
1786 hdr->components = 0;
1789 local_irq_disable();
1790 buf = mal_dump_regs(dev->mal, buf);
1791 buf = emac_dump_regs(dev, buf);
1792 if (dev->zmii_dev) {
1793 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1794 buf = zmii_dump_regs(dev->zmii_dev, buf);
1796 if (dev->rgmii_dev) {
1797 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1798 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1801 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1802 buf = tah_dump_regs(dev->tah_dev, buf);
1807 static int emac_ethtool_nway_reset(struct net_device *ndev)
1809 struct ocp_enet_private *dev = ndev->priv;
1812 DBG("%d: nway_reset" NL, dev->def->index);
1814 if (dev->phy.address < 0)
1818 if (!dev->phy.autoneg) {
1823 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1824 emac_force_link_update(dev);
1831 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1833 return EMAC_ETHTOOL_STATS_COUNT;
1836 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1839 if (stringset == ETH_SS_STATS)
1840 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1843 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1844 struct ethtool_stats *estats,
1847 struct ocp_enet_private *dev = ndev->priv;
1848 local_irq_disable();
1849 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1850 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1851 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1855 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1856 struct ethtool_drvinfo *info)
1858 struct ocp_enet_private *dev = ndev->priv;
1860 strcpy(info->driver, "ibm_emac");
1861 strcpy(info->version, DRV_VERSION);
1862 info->fw_version[0] = '\0';
1863 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1864 info->n_stats = emac_ethtool_get_stats_count(ndev);
1865 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1868 static struct ethtool_ops emac_ethtool_ops = {
1869 .get_settings = emac_ethtool_get_settings,
1870 .set_settings = emac_ethtool_set_settings,
1871 .get_drvinfo = emac_ethtool_get_drvinfo,
1873 .get_regs_len = emac_ethtool_get_regs_len,
1874 .get_regs = emac_ethtool_get_regs,
1876 .nway_reset = emac_ethtool_nway_reset,
1878 .get_ringparam = emac_ethtool_get_ringparam,
1879 .get_pauseparam = emac_ethtool_get_pauseparam,
1881 .get_rx_csum = emac_ethtool_get_rx_csum,
1883 .get_strings = emac_ethtool_get_strings,
1884 .get_stats_count = emac_ethtool_get_stats_count,
1885 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1887 .get_link = ethtool_op_get_link,
1888 .get_tx_csum = ethtool_op_get_tx_csum,
1889 .get_sg = ethtool_op_get_sg,
1892 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1894 struct ocp_enet_private *dev = ndev->priv;
1895 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1897 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1899 if (dev->phy.address < 0)
1904 case SIOCDEVPRIVATE:
1905 data[0] = dev->phy.address;
1908 case SIOCDEVPRIVATE + 1:
1909 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1913 case SIOCDEVPRIVATE + 2:
1914 if (!capable(CAP_NET_ADMIN))
1916 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1923 static int __init emac_probe(struct ocp_device *ocpdev)
1925 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1926 struct net_device *ndev;
1927 struct ocp_device *maldev;
1928 struct ocp_enet_private *dev;
1931 DBG("%d: probe" NL, ocpdev->def->index);
1934 printk(KERN_ERR "emac%d: Missing additional data!\n",
1935 ocpdev->def->index);
1939 /* Allocate our net_device structure */
1940 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1942 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1943 ocpdev->def->index);
1948 dev->ldev = &ocpdev->dev;
1949 dev->def = ocpdev->def;
1950 SET_MODULE_OWNER(ndev);
1952 /* Find MAL device we are connected to */
1954 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1956 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1957 dev->def->index, emacdata->mal_idx);
1961 dev->mal = ocp_get_drvdata(maldev);
1963 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1964 dev->def->index, emacdata->mal_idx);
1969 /* Register with MAL */
1970 dev->commac.ops = &emac_commac_ops;
1971 dev->commac.dev = dev;
1972 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1973 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1974 err = mal_register_commac(dev->mal, &dev->commac);
1976 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1977 dev->def->index, emacdata->mal_idx);
1980 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1981 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1983 /* Get pointers to BD rings */
1985 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1986 emacdata->mal_tx_chan);
1988 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1989 emacdata->mal_rx_chan);
1991 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1992 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1995 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1996 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1998 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1999 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2000 struct ocp_device *mdiodev =
2001 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2002 emacdata->mdio_idx);
2004 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2005 dev->def->index, emacdata->mdio_idx);
2009 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2010 if (!dev->mdio_dev) {
2012 "emac%d: emac%d hasn't been initialized yet!\n",
2013 dev->def->index, emacdata->mdio_idx);
2019 /* Attach to ZMII, if needed */
2020 if ((err = zmii_attach(dev)) != 0)
2023 /* Attach to RGMII, if needed */
2024 if ((err = rgmii_attach(dev)) != 0)
2027 /* Attach to TAH, if needed */
2028 if ((err = tah_attach(dev)) != 0)
2033 (struct emac_regs *)ioremap(dev->def->paddr,
2034 sizeof(struct emac_regs));
2036 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2042 /* Fill in MAC address */
2043 for (i = 0; i < 6; ++i)
2044 ndev->dev_addr[i] = emacdata->mac_addr[i];
2046 /* Set some link defaults before we can find out real parameters */
2047 dev->phy.speed = SPEED_100;
2048 dev->phy.duplex = DUPLEX_FULL;
2049 dev->phy.autoneg = AUTONEG_DISABLE;
2050 dev->phy.pause = dev->phy.asym_pause = 0;
2051 init_timer(&dev->link_timer);
2052 dev->link_timer.function = emac_link_timer;
2053 dev->link_timer.data = (unsigned long)dev;
2055 /* Find PHY if any */
2056 dev->phy.dev = ndev;
2057 dev->phy.mode = emacdata->phy_mode;
2058 if (emacdata->phy_map != 0xffffffff) {
2059 u32 phy_map = emacdata->phy_map | busy_phy_map;
2062 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2063 emacdata->phy_map, busy_phy_map);
2065 EMAC_RX_CLK_TX(dev->def->index);
2067 dev->phy.mdio_read = emac_mdio_read;
2068 dev->phy.mdio_write = emac_mdio_write;
2070 /* Configure EMAC with defaults so we can at least use MDIO
2071 * This is needed mostly for 440GX
2073 if (emac_phy_gpcs(dev->phy.mode)) {
2075 * Make GPCS PHY address equal to EMAC index.
2076 * We probably should take into account busy_phy_map
2077 * and/or phy_map here.
2079 dev->phy.address = dev->def->index;
2082 emac_configure(dev);
2084 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2085 if (!(phy_map & 1)) {
2087 busy_phy_map |= 1 << i;
2089 /* Quick check if there is a PHY at the address */
2090 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2091 if (r == 0xffff || r < 0)
2093 if (!mii_phy_probe(&dev->phy, i))
2097 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2103 if (dev->phy.def->ops->init)
2104 dev->phy.def->ops->init(&dev->phy);
2106 /* Disable any PHY features not supported by the platform */
2107 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2109 /* Setup initial link parameters */
2110 if (dev->phy.features & SUPPORTED_Autoneg) {
2111 adv = dev->phy.features;
2112 #if !defined(CONFIG_40x)
2113 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2115 /* Restart autonegotiation */
2116 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2118 u32 f = dev->phy.def->features;
2119 int speed = SPEED_10, fd = DUPLEX_HALF;
2121 /* Select highest supported speed/duplex */
2122 if (f & SUPPORTED_1000baseT_Full) {
2125 } else if (f & SUPPORTED_1000baseT_Half)
2127 else if (f & SUPPORTED_100baseT_Full) {
2130 } else if (f & SUPPORTED_100baseT_Half)
2132 else if (f & SUPPORTED_10baseT_Full)
2135 /* Force link parameters */
2136 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2141 /* PHY-less configuration.
2142 * XXX I probably should move these settings to emacdata
2144 dev->phy.address = -1;
2145 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2149 /* Fill in the driver function table */
2150 ndev->open = &emac_open;
2152 ndev->hard_start_xmit = &emac_start_xmit_sg;
2153 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2155 ndev->hard_start_xmit = &emac_start_xmit;
2156 ndev->tx_timeout = &emac_full_tx_reset;
2157 ndev->watchdog_timeo = 5 * HZ;
2158 ndev->stop = &emac_close;
2159 ndev->get_stats = &emac_stats;
2160 ndev->set_multicast_list = &emac_set_multicast_list;
2161 ndev->do_ioctl = &emac_ioctl;
2162 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2163 ndev->change_mtu = &emac_change_mtu;
2164 dev->commac.ops = &emac_commac_sg_ops;
2166 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2168 netif_carrier_off(ndev);
2169 netif_stop_queue(ndev);
2171 err = register_netdev(ndev);
2173 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2174 dev->def->index, err);
2178 ocp_set_drvdata(ocpdev, dev);
2180 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2181 ndev->name, dev->def->index,
2182 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2183 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2185 if (dev->phy.address >= 0)
2186 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2187 dev->phy.def->name, dev->phy.address);
2189 emac_dbg_register(dev->def->index, dev);
2193 iounmap((void *)dev->emacp);
2195 tah_fini(dev->tah_dev);
2197 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2199 zmii_fini(dev->zmii_dev, dev->zmii_input);
2201 mal_unregister_commac(dev->mal, &dev->commac);
2207 static struct ocp_device_id emac_ids[] = {
2208 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2209 { .vendor = OCP_VENDOR_INVALID}
2212 static struct ocp_driver emac_driver = {
2214 .id_table = emac_ids,
2215 .probe = emac_probe,
2216 .remove = emac_remove,
2219 static int __init emac_init(void)
2221 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2229 if (ocp_register_driver(&emac_driver)) {
2231 ocp_unregister_driver(&emac_driver);
2241 static void __exit emac_exit(void)
2244 ocp_unregister_driver(&emac_driver);
2249 module_init(emac_init);
2250 module_exit(emac_exit);