2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
51 * Lack of dma_unmap_???? calls is intentional.
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.53"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71 MODULE_DESCRIPTION(DRV_DESC);
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 static u32 busy_phy_map;
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92 * with PHY RX clock problem.
93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock
96 static inline void EMAC_RX_CLK_TX(int idx)
99 local_irq_save(flags);
101 #if defined(CONFIG_405EP)
102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
103 #else /* CONFIG_440EP */
104 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
107 local_irq_restore(flags);
110 static inline void EMAC_RX_CLK_DEFAULT(int idx)
113 local_irq_save(flags);
115 #if defined(CONFIG_405EP)
116 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
117 #else /* CONFIG_440EP */
118 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
121 local_irq_restore(flags);
124 #define EMAC_RX_CLK_TX(idx) ((void)0)
125 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
130 * unfortunately this is less flexible than 440EP case, because it's a global
131 * setting for all EMACs, therefore we do this clock trick only during probe.
133 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
136 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138 #define EMAC_CLK_INTERNAL ((void)0)
139 #define EMAC_CLK_EXTERNAL ((void)0)
142 /* I don't want to litter system log with timeout errors
143 * when we have brain-damaged PHY.
145 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149 DBG("%d: %s" NL, dev->def->index, error);
152 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON HZ
158 #define PHY_POLL_LINK_OFF (HZ / 5)
160 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
162 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173 "tx_bd_excessive_collisions", "tx_bd_late_collision",
174 "tx_bd_multple_collisions", "tx_bd_single_collision",
175 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
179 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
180 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
182 static inline int emac_phy_supports_gige(int phy_mode)
184 return phy_mode == PHY_MODE_GMII ||
185 phy_mode == PHY_MODE_RGMII ||
186 phy_mode == PHY_MODE_TBI ||
187 phy_mode == PHY_MODE_RTBI;
190 static inline int emac_phy_gpcs(int phy_mode)
192 return phy_mode == PHY_MODE_TBI ||
193 phy_mode == PHY_MODE_RTBI;
196 static inline void emac_tx_enable(struct ocp_enet_private *dev)
198 struct emac_regs *p = dev->emacp;
202 local_irq_save(flags);
204 DBG("%d: tx_enable" NL, dev->def->index);
206 r = in_be32(&p->mr0);
207 if (!(r & EMAC_MR0_TXE))
208 out_be32(&p->mr0, r | EMAC_MR0_TXE);
209 local_irq_restore(flags);
212 static void emac_tx_disable(struct ocp_enet_private *dev)
214 struct emac_regs *p = dev->emacp;
218 local_irq_save(flags);
220 DBG("%d: tx_disable" NL, dev->def->index);
222 r = in_be32(&p->mr0);
223 if (r & EMAC_MR0_TXE) {
225 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
226 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
229 emac_report_timeout_error(dev, "TX disable timeout");
231 local_irq_restore(flags);
234 static void emac_rx_enable(struct ocp_enet_private *dev)
236 struct emac_regs *p = dev->emacp;
240 local_irq_save(flags);
241 if (unlikely(dev->commac.rx_stopped))
244 DBG("%d: rx_enable" NL, dev->def->index);
246 r = in_be32(&p->mr0);
247 if (!(r & EMAC_MR0_RXE)) {
248 if (unlikely(!(r & EMAC_MR0_RXI))) {
249 /* Wait if previous async disable is still in progress */
251 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
254 emac_report_timeout_error(dev,
255 "RX disable timeout");
257 out_be32(&p->mr0, r | EMAC_MR0_RXE);
260 local_irq_restore(flags);
263 static void emac_rx_disable(struct ocp_enet_private *dev)
265 struct emac_regs *p = dev->emacp;
269 local_irq_save(flags);
271 DBG("%d: rx_disable" NL, dev->def->index);
273 r = in_be32(&p->mr0);
274 if (r & EMAC_MR0_RXE) {
276 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
277 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
280 emac_report_timeout_error(dev, "RX disable timeout");
282 local_irq_restore(flags);
285 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
287 struct emac_regs *p = dev->emacp;
291 local_irq_save(flags);
293 DBG("%d: rx_disable_async" NL, dev->def->index);
295 r = in_be32(&p->mr0);
296 if (r & EMAC_MR0_RXE)
297 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 local_irq_restore(flags);
301 static int emac_reset(struct ocp_enet_private *dev)
303 struct emac_regs *p = dev->emacp;
307 DBG("%d: reset" NL, dev->def->index);
309 local_irq_save(flags);
311 if (!dev->reset_failed) {
312 /* 40x erratum suggests stopping RX channel before reset,
315 emac_rx_disable(dev);
316 emac_tx_disable(dev);
319 out_be32(&p->mr0, EMAC_MR0_SRST);
320 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
322 local_irq_restore(flags);
325 dev->reset_failed = 0;
328 emac_report_timeout_error(dev, "reset timeout");
329 dev->reset_failed = 1;
334 static void emac_hash_mc(struct ocp_enet_private *dev)
336 struct emac_regs *p = dev->emacp;
338 struct dev_mc_list *dmi;
340 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
342 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
344 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
346 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
347 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
349 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
350 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
352 out_be32(&p->gaht1, gaht[0]);
353 out_be32(&p->gaht2, gaht[1]);
354 out_be32(&p->gaht3, gaht[2]);
355 out_be32(&p->gaht4, gaht[3]);
358 static inline u32 emac_iff2rmr(struct net_device *ndev)
360 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
363 if (ndev->flags & IFF_PROMISC)
365 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
367 else if (ndev->mc_count > 0)
373 static inline int emac_opb_mhz(void)
375 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
379 static int emac_configure(struct ocp_enet_private *dev)
381 struct emac_regs *p = dev->emacp;
382 struct net_device *ndev = dev->ndev;
386 DBG("%d: configure" NL, dev->def->index);
388 if (emac_reset(dev) < 0)
391 tah_reset(dev->tah_dev);
394 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
395 if (dev->phy.duplex == DUPLEX_FULL)
397 switch (dev->phy.speed) {
399 if (emac_phy_gpcs(dev->phy.mode)) {
400 r |= EMAC_MR1_MF_1000GPCS |
401 EMAC_MR1_MF_IPPA(dev->phy.address);
403 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
404 * identify this GPCS PHY later.
406 out_be32(&p->ipcr, 0xdeadbeef);
408 r |= EMAC_MR1_MF_1000;
409 r |= EMAC_MR1_RFS_16K;
412 if (dev->ndev->mtu > ETH_DATA_LEN)
416 r |= EMAC_MR1_MF_100;
419 r |= EMAC_MR1_RFS_4K;
425 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
428 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
430 #if !defined(CONFIG_40x)
431 /* on 40x erratum forces us to NOT use integrated flow control,
432 * let's hope it works on 44x ;)
434 if (dev->phy.duplex == DUPLEX_FULL) {
436 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
437 else if (dev->phy.asym_pause)
441 out_be32(&p->mr1, r);
443 /* Set individual MAC address */
444 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
445 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
446 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
449 /* VLAN Tag Protocol ID */
450 out_be32(&p->vtpid, 0x8100);
452 /* Receive mode register */
453 r = emac_iff2rmr(ndev);
454 if (r & EMAC_RMR_MAE)
456 out_be32(&p->rmr, r);
458 /* FIFOs thresholds */
459 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
460 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
461 out_be32(&p->tmr1, r);
462 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
464 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465 there should be still enough space in FIFO to allow the our link
466 partner time to process this frame and also time to send PAUSE
469 Here is the worst case scenario for the RX FIFO "headroom"
470 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
472 1) One maximum-length frame on TX 1522 bytes
473 2) One PAUSE frame time 64 bytes
474 3) PAUSE frame decode time allowance 64 bytes
475 4) One maximum-length frame on RX 1522 bytes
476 5) Round-trip propagation delay of the link (100Mb) 15 bytes
480 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
483 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
484 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
485 out_be32(&p->rwmr, r);
487 /* Set PAUSE timer to the maximum */
488 out_be32(&p->ptr, 0xffff);
491 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
492 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
493 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
494 EMAC_ISR_IRE | EMAC_ISR_TE);
496 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497 if (emac_phy_gpcs(dev->phy.mode))
498 mii_reset_phy(&dev->phy);
504 static void emac_reinitialize(struct ocp_enet_private *dev)
506 DBG("%d: reinitialize" NL, dev->def->index);
508 if (!emac_configure(dev)) {
515 static void emac_full_tx_reset(struct net_device *ndev)
517 struct ocp_enet_private *dev = ndev->priv;
518 struct ocp_func_emac_data *emacdata = dev->def->additions;
520 DBG("%d: full_tx_reset" NL, dev->def->index);
522 emac_tx_disable(dev);
523 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
524 emac_clean_tx_ring(dev);
525 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
529 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
533 netif_wake_queue(ndev);
536 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
538 struct emac_regs *p = dev->emacp;
542 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
544 /* Enable proper MDIO port */
545 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
547 /* Wait for management interface to become idle */
549 while (!emac_phy_done(in_be32(&p->stacr))) {
555 /* Issue read command */
557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558 (reg & EMAC_STACR_PRA_MASK)
559 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
562 /* Wait for read to complete */
564 while (!emac_phy_done(r = in_be32(&p->stacr))) {
570 if (unlikely(r & EMAC_STACR_PHYE)) {
571 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
576 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
577 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
580 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
584 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
587 struct emac_regs *p = dev->emacp;
590 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
593 /* Enable proper MDIO port */
594 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
596 /* Wait for management interface to be idle */
598 while (!emac_phy_done(in_be32(&p->stacr))) {
604 /* Issue write command */
606 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
607 (reg & EMAC_STACR_PRA_MASK) |
608 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
609 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
611 /* Wait for write to complete */
613 while (!emac_phy_done(in_be32(&p->stacr))) {
620 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
623 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
625 struct ocp_enet_private *dev = ndev->priv;
629 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
635 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
637 struct ocp_enet_private *dev = ndev->priv;
640 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
641 (u8) reg, (u16) val);
646 static void emac_set_multicast_list(struct net_device *ndev)
648 struct ocp_enet_private *dev = ndev->priv;
649 struct emac_regs *p = dev->emacp;
650 u32 rmr = emac_iff2rmr(ndev);
652 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
653 BUG_ON(!netif_running(dev->ndev));
655 /* I decided to relax register access rules here to avoid
658 * There is a real problem with EMAC4 core if we use MWSW_001 bit
659 * in MR1 register and do a full EMAC reset.
660 * One TX BD status update is delayed and, after EMAC reset, it
661 * never happens, resulting in TX hung (it'll be recovered by TX
662 * timeout handler eventually, but this is just gross).
663 * So we either have to do full TX reset or try to cheat here :)
665 * The only required change is to RX mode register, so I *think* all
666 * we need is just to stop RX channel. This seems to work on all
669 emac_rx_disable(dev);
670 if (rmr & EMAC_RMR_MAE)
672 out_be32(&p->rmr, rmr);
677 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
679 struct ocp_func_emac_data *emacdata = dev->def->additions;
680 int rx_sync_size = emac_rx_sync_size(new_mtu);
681 int rx_skb_size = emac_rx_skb_size(new_mtu);
684 emac_rx_disable(dev);
685 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
687 if (dev->rx_sg_skb) {
688 ++dev->estats.rx_dropped_resize;
689 dev_kfree_skb(dev->rx_sg_skb);
690 dev->rx_sg_skb = NULL;
693 /* Make a first pass over RX ring and mark BDs ready, dropping
694 * non-processed packets on the way. We need this as a separate pass
695 * to simplify error recovery in the case of allocation failure later.
697 for (i = 0; i < NUM_RX_BUFF; ++i) {
698 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
699 ++dev->estats.rx_dropped_resize;
701 dev->rx_desc[i].data_len = 0;
702 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
703 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
706 /* Reallocate RX ring only if bigger skb buffers are required */
707 if (rx_skb_size <= dev->rx_skb_size)
710 /* Second pass, allocate new skbs */
711 for (i = 0; i < NUM_RX_BUFF; ++i) {
712 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
718 BUG_ON(!dev->rx_skb[i]);
719 dev_kfree_skb(dev->rx_skb[i]);
721 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
722 dev->rx_desc[i].data_ptr =
723 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
724 DMA_FROM_DEVICE) + 2;
725 dev->rx_skb[i] = skb;
728 /* Check if we need to change "Jumbo" bit in MR1 */
729 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
730 /* This is to prevent starting RX channel in emac_rx_enable() */
731 dev->commac.rx_stopped = 1;
733 dev->ndev->mtu = new_mtu;
734 emac_full_tx_reset(dev->ndev);
737 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
740 dev->commac.rx_stopped = dev->rx_slot = 0;
741 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
747 /* Process ctx, rtnl_lock semaphore */
748 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
750 struct ocp_enet_private *dev = ndev->priv;
753 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
756 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
759 if (netif_running(ndev)) {
760 /* Check if we really need to reinitalize RX ring */
761 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
762 ret = emac_resize_rx_ring(dev, new_mtu);
767 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
768 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
775 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
778 for (i = 0; i < NUM_TX_BUFF; ++i) {
779 if (dev->tx_skb[i]) {
780 dev_kfree_skb(dev->tx_skb[i]);
781 dev->tx_skb[i] = NULL;
782 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
783 ++dev->estats.tx_dropped;
785 dev->tx_desc[i].ctrl = 0;
786 dev->tx_desc[i].data_ptr = 0;
790 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
793 for (i = 0; i < NUM_RX_BUFF; ++i)
794 if (dev->rx_skb[i]) {
795 dev->rx_desc[i].ctrl = 0;
796 dev_kfree_skb(dev->rx_skb[i]);
797 dev->rx_skb[i] = NULL;
798 dev->rx_desc[i].data_ptr = 0;
801 if (dev->rx_sg_skb) {
802 dev_kfree_skb(dev->rx_sg_skb);
803 dev->rx_sg_skb = NULL;
807 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
810 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
814 dev->rx_skb[slot] = skb;
815 dev->rx_desc[slot].data_len = 0;
817 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
818 dev->rx_desc[slot].data_ptr =
819 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
820 DMA_FROM_DEVICE) + 2;
822 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
823 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
828 static void emac_print_link_status(struct ocp_enet_private *dev)
830 if (netif_carrier_ok(dev->ndev))
831 printk(KERN_INFO "%s: link is up, %d %s%s\n",
832 dev->ndev->name, dev->phy.speed,
833 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
834 dev->phy.pause ? ", pause enabled" :
835 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
837 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
840 /* Process ctx, rtnl_lock semaphore */
841 static int emac_open(struct net_device *ndev)
843 struct ocp_enet_private *dev = ndev->priv;
844 struct ocp_func_emac_data *emacdata = dev->def->additions;
847 DBG("%d: open" NL, dev->def->index);
849 /* Setup error IRQ handler */
850 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
852 printk(KERN_ERR "%s: failed to request IRQ %d\n",
853 ndev->name, dev->def->irq);
857 /* Allocate RX ring */
858 for (i = 0; i < NUM_RX_BUFF; ++i)
859 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
860 printk(KERN_ERR "%s: failed to allocate RX ring\n",
866 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
867 dev->commac.rx_stopped = 0;
868 dev->rx_sg_skb = NULL;
870 if (dev->phy.address >= 0) {
871 int link_poll_interval;
872 if (dev->phy.def->ops->poll_link(&dev->phy)) {
873 dev->phy.def->ops->read_link(&dev->phy);
874 EMAC_RX_CLK_DEFAULT(dev->def->index);
875 netif_carrier_on(dev->ndev);
876 link_poll_interval = PHY_POLL_LINK_ON;
878 EMAC_RX_CLK_TX(dev->def->index);
879 netif_carrier_off(dev->ndev);
880 link_poll_interval = PHY_POLL_LINK_OFF;
882 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
883 emac_print_link_status(dev);
885 netif_carrier_on(dev->ndev);
888 mal_poll_add(dev->mal, &dev->commac);
889 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
890 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
891 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
894 netif_start_queue(ndev);
899 emac_clean_rx_ring(dev);
900 free_irq(dev->def->irq, dev);
905 static int emac_link_differs(struct ocp_enet_private *dev)
907 u32 r = in_be32(&dev->emacp->mr1);
909 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
910 int speed, pause, asym_pause;
912 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
914 else if (r & EMAC_MR1_MF_100)
919 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
920 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
929 pause = asym_pause = 0;
931 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
932 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
936 static void emac_link_timer(unsigned long data)
938 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
939 int link_poll_interval;
941 DBG2("%d: link timer" NL, dev->def->index);
943 if (dev->phy.def->ops->poll_link(&dev->phy)) {
944 if (!netif_carrier_ok(dev->ndev)) {
945 EMAC_RX_CLK_DEFAULT(dev->def->index);
947 /* Get new link parameters */
948 dev->phy.def->ops->read_link(&dev->phy);
950 if (dev->tah_dev || emac_link_differs(dev))
951 emac_full_tx_reset(dev->ndev);
953 netif_carrier_on(dev->ndev);
954 emac_print_link_status(dev);
956 link_poll_interval = PHY_POLL_LINK_ON;
958 if (netif_carrier_ok(dev->ndev)) {
959 EMAC_RX_CLK_TX(dev->def->index);
960 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
961 emac_reinitialize(dev);
963 netif_carrier_off(dev->ndev);
964 emac_print_link_status(dev);
967 /* Retry reset if the previous attempt failed.
968 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
969 * case, but I left it here because it shouldn't trigger for
972 if (unlikely(dev->reset_failed))
973 emac_reinitialize(dev);
975 link_poll_interval = PHY_POLL_LINK_OFF;
977 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
981 static void emac_force_link_update(struct ocp_enet_private *dev)
983 netif_carrier_off(dev->ndev);
984 if (timer_pending(&dev->link_timer))
985 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
988 /* Process ctx, rtnl_lock semaphore */
989 static int emac_close(struct net_device *ndev)
991 struct ocp_enet_private *dev = ndev->priv;
992 struct ocp_func_emac_data *emacdata = dev->def->additions;
994 DBG("%d: close" NL, dev->def->index);
998 if (dev->phy.address >= 0)
999 del_timer_sync(&dev->link_timer);
1001 netif_stop_queue(ndev);
1002 emac_rx_disable(dev);
1003 emac_tx_disable(dev);
1004 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1005 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1006 mal_poll_del(dev->mal, &dev->commac);
1009 emac_clean_tx_ring(dev);
1010 emac_clean_rx_ring(dev);
1011 free_irq(dev->def->irq, dev);
1016 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1017 struct sk_buff *skb)
1019 #if defined(CONFIG_IBM_EMAC_TAH)
1020 if (skb->ip_summed == CHECKSUM_HW) {
1021 ++dev->stats.tx_packets_csum;
1022 return EMAC_TX_CTRL_TAH_CSUM;
1028 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1030 struct emac_regs *p = dev->emacp;
1031 struct net_device *ndev = dev->ndev;
1033 /* Send the packet out */
1034 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1036 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1037 netif_stop_queue(ndev);
1038 DBG2("%d: stopped TX queue" NL, dev->def->index);
1041 ndev->trans_start = jiffies;
1042 ++dev->stats.tx_packets;
1043 dev->stats.tx_bytes += len;
1049 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1051 struct ocp_enet_private *dev = ndev->priv;
1052 unsigned int len = skb->len;
1055 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1056 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1058 slot = dev->tx_slot++;
1059 if (dev->tx_slot == NUM_TX_BUFF) {
1061 ctrl |= MAL_TX_CTRL_WRAP;
1064 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1066 dev->tx_skb[slot] = skb;
1067 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1069 dev->tx_desc[slot].data_len = (u16) len;
1071 dev->tx_desc[slot].ctrl = ctrl;
1073 return emac_xmit_finish(dev, len);
1076 #if defined(CONFIG_IBM_EMAC_TAH)
1077 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1078 u32 pd, int len, int last, u16 base_ctrl)
1081 u16 ctrl = base_ctrl;
1082 int chunk = min(len, MAL_MAX_TX_SIZE);
1085 slot = (slot + 1) % NUM_TX_BUFF;
1088 ctrl |= MAL_TX_CTRL_LAST;
1089 if (slot == NUM_TX_BUFF - 1)
1090 ctrl |= MAL_TX_CTRL_WRAP;
1092 dev->tx_skb[slot] = NULL;
1093 dev->tx_desc[slot].data_ptr = pd;
1094 dev->tx_desc[slot].data_len = (u16) chunk;
1095 dev->tx_desc[slot].ctrl = ctrl;
1106 /* BHs disabled (SG version for TAH equipped EMACs) */
1107 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1109 struct ocp_enet_private *dev = ndev->priv;
1110 int nr_frags = skb_shinfo(skb)->nr_frags;
1111 int len = skb->len, chunk;
1116 /* This is common "fast" path */
1117 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1118 return emac_start_xmit(skb, ndev);
1120 len -= skb->data_len;
1122 /* Note, this is only an *estimation*, we can still run out of empty
1123 * slots because of the additional fragmentation into
1124 * MAL_MAX_TX_SIZE-sized chunks
1126 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1129 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1130 emac_tx_csum(dev, skb);
1131 slot = dev->tx_slot;
1134 dev->tx_skb[slot] = NULL;
1135 chunk = min(len, MAL_MAX_TX_SIZE);
1136 dev->tx_desc[slot].data_ptr = pd =
1137 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1138 dev->tx_desc[slot].data_len = (u16) chunk;
1141 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1144 for (i = 0; i < nr_frags; ++i) {
1145 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1148 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1151 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1154 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1158 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1159 dev->tx_slot, slot);
1161 /* Attach skb to the last slot so we don't release it too early */
1162 dev->tx_skb[slot] = skb;
1164 /* Send the packet out */
1165 if (dev->tx_slot == NUM_TX_BUFF - 1)
1166 ctrl |= MAL_TX_CTRL_WRAP;
1168 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1169 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1171 return emac_xmit_finish(dev, skb->len);
1174 /* Well, too bad. Our previous estimation was overly optimistic.
1177 while (slot != dev->tx_slot) {
1178 dev->tx_desc[slot].ctrl = 0;
1181 slot = NUM_TX_BUFF - 1;
1183 ++dev->estats.tx_undo;
1186 netif_stop_queue(ndev);
1187 DBG2("%d: stopped TX queue" NL, dev->def->index);
1191 # define emac_start_xmit_sg emac_start_xmit
1192 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1195 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1197 struct ibm_emac_error_stats *st = &dev->estats;
1198 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1201 if (ctrl & EMAC_TX_ST_BFCS)
1202 ++st->tx_bd_bad_fcs;
1203 if (ctrl & EMAC_TX_ST_LCS)
1204 ++st->tx_bd_carrier_loss;
1205 if (ctrl & EMAC_TX_ST_ED)
1206 ++st->tx_bd_excessive_deferral;
1207 if (ctrl & EMAC_TX_ST_EC)
1208 ++st->tx_bd_excessive_collisions;
1209 if (ctrl & EMAC_TX_ST_LC)
1210 ++st->tx_bd_late_collision;
1211 if (ctrl & EMAC_TX_ST_MC)
1212 ++st->tx_bd_multple_collisions;
1213 if (ctrl & EMAC_TX_ST_SC)
1214 ++st->tx_bd_single_collision;
1215 if (ctrl & EMAC_TX_ST_UR)
1216 ++st->tx_bd_underrun;
1217 if (ctrl & EMAC_TX_ST_SQE)
1221 static void emac_poll_tx(void *param)
1223 struct ocp_enet_private *dev = param;
1224 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1229 int slot = dev->ack_slot, n = 0;
1231 ctrl = dev->tx_desc[slot].ctrl;
1232 if (!(ctrl & MAL_TX_CTRL_READY)) {
1233 struct sk_buff *skb = dev->tx_skb[slot];
1238 dev->tx_skb[slot] = NULL;
1240 slot = (slot + 1) % NUM_TX_BUFF;
1242 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1243 emac_parse_tx_error(dev, ctrl);
1249 dev->ack_slot = slot;
1250 if (netif_queue_stopped(dev->ndev) &&
1251 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1252 netif_wake_queue(dev->ndev);
1254 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1259 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1262 struct sk_buff *skb = dev->rx_skb[slot];
1263 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1266 dma_map_single(dev->ldev, skb->data - 2,
1267 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1269 dev->rx_desc[slot].data_len = 0;
1271 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1272 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1275 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1277 struct ibm_emac_error_stats *st = &dev->estats;
1278 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1281 if (ctrl & EMAC_RX_ST_OE)
1282 ++st->rx_bd_overrun;
1283 if (ctrl & EMAC_RX_ST_BP)
1284 ++st->rx_bd_bad_packet;
1285 if (ctrl & EMAC_RX_ST_RP)
1286 ++st->rx_bd_runt_packet;
1287 if (ctrl & EMAC_RX_ST_SE)
1288 ++st->rx_bd_short_event;
1289 if (ctrl & EMAC_RX_ST_AE)
1290 ++st->rx_bd_alignment_error;
1291 if (ctrl & EMAC_RX_ST_BFCS)
1292 ++st->rx_bd_bad_fcs;
1293 if (ctrl & EMAC_RX_ST_PTL)
1294 ++st->rx_bd_packet_too_long;
1295 if (ctrl & EMAC_RX_ST_ORE)
1296 ++st->rx_bd_out_of_range;
1297 if (ctrl & EMAC_RX_ST_IRE)
1298 ++st->rx_bd_in_range;
1301 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1302 struct sk_buff *skb, u16 ctrl)
1304 #if defined(CONFIG_IBM_EMAC_TAH)
1305 if (!ctrl && dev->tah_dev) {
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307 ++dev->stats.rx_packets_csum;
1312 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1314 if (likely(dev->rx_sg_skb != NULL)) {
1315 int len = dev->rx_desc[slot].data_len;
1316 int tot_len = dev->rx_sg_skb->len + len;
1318 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1319 ++dev->estats.rx_dropped_mtu;
1320 dev_kfree_skb(dev->rx_sg_skb);
1321 dev->rx_sg_skb = NULL;
1323 cacheable_memcpy(dev->rx_sg_skb->tail,
1324 dev->rx_skb[slot]->data, len);
1325 skb_put(dev->rx_sg_skb, len);
1326 emac_recycle_rx_skb(dev, slot, len);
1330 emac_recycle_rx_skb(dev, slot, 0);
1335 static int emac_poll_rx(void *param, int budget)
1337 struct ocp_enet_private *dev = param;
1338 int slot = dev->rx_slot, received = 0;
1340 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1343 while (budget > 0) {
1345 struct sk_buff *skb;
1346 u16 ctrl = dev->rx_desc[slot].ctrl;
1348 if (ctrl & MAL_RX_CTRL_EMPTY)
1351 skb = dev->rx_skb[slot];
1353 len = dev->rx_desc[slot].data_len;
1355 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1358 ctrl &= EMAC_BAD_RX_MASK;
1359 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1360 emac_parse_rx_error(dev, ctrl);
1361 ++dev->estats.rx_dropped_error;
1362 emac_recycle_rx_skb(dev, slot, 0);
1367 if (len && len < EMAC_RX_COPY_THRESH) {
1368 struct sk_buff *copy_skb =
1369 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1370 if (unlikely(!copy_skb))
1373 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1374 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1376 emac_recycle_rx_skb(dev, slot, len);
1378 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1383 skb->dev = dev->ndev;
1384 skb->protocol = eth_type_trans(skb, dev->ndev);
1385 emac_rx_csum(dev, skb, ctrl);
1387 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1388 ++dev->estats.rx_dropped_stack;
1390 ++dev->stats.rx_packets;
1392 dev->stats.rx_bytes += len;
1393 slot = (slot + 1) % NUM_RX_BUFF;
1398 if (ctrl & MAL_RX_CTRL_FIRST) {
1399 BUG_ON(dev->rx_sg_skb);
1400 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1401 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1402 ++dev->estats.rx_dropped_oom;
1403 emac_recycle_rx_skb(dev, slot, 0);
1405 dev->rx_sg_skb = skb;
1408 } else if (!emac_rx_sg_append(dev, slot) &&
1409 (ctrl & MAL_RX_CTRL_LAST)) {
1411 skb = dev->rx_sg_skb;
1412 dev->rx_sg_skb = NULL;
1414 ctrl &= EMAC_BAD_RX_MASK;
1415 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1416 emac_parse_rx_error(dev, ctrl);
1417 ++dev->estats.rx_dropped_error;
1425 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1426 /* Drop the packet and recycle skb */
1427 ++dev->estats.rx_dropped_oom;
1428 emac_recycle_rx_skb(dev, slot, 0);
1433 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1434 dev->rx_slot = slot;
1437 if (unlikely(budget && dev->commac.rx_stopped)) {
1438 struct ocp_func_emac_data *emacdata = dev->def->additions;
1441 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1442 DBG2("%d: rx restart" NL, dev->def->index);
1447 if (dev->rx_sg_skb) {
1448 DBG2("%d: dropping partial rx packet" NL,
1450 ++dev->estats.rx_dropped_error;
1451 dev_kfree_skb(dev->rx_sg_skb);
1452 dev->rx_sg_skb = NULL;
1455 dev->commac.rx_stopped = 0;
1456 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1457 emac_rx_enable(dev);
1464 static int emac_peek_rx(void *param)
1466 struct ocp_enet_private *dev = param;
1467 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1471 static int emac_peek_rx_sg(void *param)
1473 struct ocp_enet_private *dev = param;
1474 int slot = dev->rx_slot;
1476 u16 ctrl = dev->rx_desc[slot].ctrl;
1477 if (ctrl & MAL_RX_CTRL_EMPTY)
1479 else if (ctrl & MAL_RX_CTRL_LAST)
1482 slot = (slot + 1) % NUM_RX_BUFF;
1484 /* I'm just being paranoid here :) */
1485 if (unlikely(slot == dev->rx_slot))
1491 static void emac_rxde(void *param)
1493 struct ocp_enet_private *dev = param;
1494 ++dev->estats.rx_stopped;
1495 emac_rx_disable_async(dev);
1499 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1501 struct ocp_enet_private *dev = dev_instance;
1502 struct emac_regs *p = dev->emacp;
1503 struct ibm_emac_error_stats *st = &dev->estats;
1505 u32 isr = in_be32(&p->isr);
1506 out_be32(&p->isr, isr);
1508 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1510 if (isr & EMAC_ISR_TXPE)
1512 if (isr & EMAC_ISR_RXPE)
1514 if (isr & EMAC_ISR_TXUE)
1516 if (isr & EMAC_ISR_RXOE)
1517 ++st->rx_fifo_overrun;
1518 if (isr & EMAC_ISR_OVR)
1520 if (isr & EMAC_ISR_BP)
1521 ++st->rx_bad_packet;
1522 if (isr & EMAC_ISR_RP)
1523 ++st->rx_runt_packet;
1524 if (isr & EMAC_ISR_SE)
1525 ++st->rx_short_event;
1526 if (isr & EMAC_ISR_ALE)
1527 ++st->rx_alignment_error;
1528 if (isr & EMAC_ISR_BFCS)
1530 if (isr & EMAC_ISR_PTLE)
1531 ++st->rx_packet_too_long;
1532 if (isr & EMAC_ISR_ORE)
1533 ++st->rx_out_of_range;
1534 if (isr & EMAC_ISR_IRE)
1536 if (isr & EMAC_ISR_SQE)
1538 if (isr & EMAC_ISR_TE)
1544 static struct net_device_stats *emac_stats(struct net_device *ndev)
1546 struct ocp_enet_private *dev = ndev->priv;
1547 struct ibm_emac_stats *st = &dev->stats;
1548 struct ibm_emac_error_stats *est = &dev->estats;
1549 struct net_device_stats *nst = &dev->nstats;
1551 DBG2("%d: stats" NL, dev->def->index);
1553 /* Compute "legacy" statistics */
1554 local_irq_disable();
1555 nst->rx_packets = (unsigned long)st->rx_packets;
1556 nst->rx_bytes = (unsigned long)st->rx_bytes;
1557 nst->tx_packets = (unsigned long)st->tx_packets;
1558 nst->tx_bytes = (unsigned long)st->tx_bytes;
1559 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1560 est->rx_dropped_error +
1561 est->rx_dropped_resize +
1562 est->rx_dropped_mtu);
1563 nst->tx_dropped = (unsigned long)est->tx_dropped;
1565 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1566 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1567 est->rx_fifo_overrun +
1569 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1570 est->rx_alignment_error);
1571 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1573 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1574 est->rx_bd_short_event +
1575 est->rx_bd_packet_too_long +
1576 est->rx_bd_out_of_range +
1577 est->rx_bd_in_range +
1578 est->rx_runt_packet +
1579 est->rx_short_event +
1580 est->rx_packet_too_long +
1581 est->rx_out_of_range +
1584 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1585 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1587 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1588 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1589 est->tx_bd_excessive_collisions +
1590 est->tx_bd_late_collision +
1591 est->tx_bd_multple_collisions);
1596 static void emac_remove(struct ocp_device *ocpdev)
1598 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1600 DBG("%d: remove" NL, dev->def->index);
1602 ocp_set_drvdata(ocpdev, 0);
1603 unregister_netdev(dev->ndev);
1605 tah_fini(dev->tah_dev);
1606 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1607 zmii_fini(dev->zmii_dev, dev->zmii_input);
1609 emac_dbg_register(dev->def->index, 0);
1611 mal_unregister_commac(dev->mal, &dev->commac);
1612 iounmap((void *)dev->emacp);
1616 static struct mal_commac_ops emac_commac_ops = {
1617 .poll_tx = &emac_poll_tx,
1618 .poll_rx = &emac_poll_rx,
1619 .peek_rx = &emac_peek_rx,
1623 static struct mal_commac_ops emac_commac_sg_ops = {
1624 .poll_tx = &emac_poll_tx,
1625 .poll_rx = &emac_poll_rx,
1626 .peek_rx = &emac_peek_rx_sg,
1630 /* Ethtool support */
1631 static int emac_ethtool_get_settings(struct net_device *ndev,
1632 struct ethtool_cmd *cmd)
1634 struct ocp_enet_private *dev = ndev->priv;
1636 cmd->supported = dev->phy.features;
1637 cmd->port = PORT_MII;
1638 cmd->phy_address = dev->phy.address;
1640 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1643 cmd->advertising = dev->phy.advertising;
1644 cmd->autoneg = dev->phy.autoneg;
1645 cmd->speed = dev->phy.speed;
1646 cmd->duplex = dev->phy.duplex;
1652 static int emac_ethtool_set_settings(struct net_device *ndev,
1653 struct ethtool_cmd *cmd)
1655 struct ocp_enet_private *dev = ndev->priv;
1656 u32 f = dev->phy.features;
1658 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1659 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1661 /* Basic sanity checks */
1662 if (dev->phy.address < 0)
1664 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1666 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1668 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1671 if (cmd->autoneg == AUTONEG_DISABLE) {
1672 switch (cmd->speed) {
1674 if (cmd->duplex == DUPLEX_HALF
1675 && !(f & SUPPORTED_10baseT_Half))
1677 if (cmd->duplex == DUPLEX_FULL
1678 && !(f & SUPPORTED_10baseT_Full))
1682 if (cmd->duplex == DUPLEX_HALF
1683 && !(f & SUPPORTED_100baseT_Half))
1685 if (cmd->duplex == DUPLEX_FULL
1686 && !(f & SUPPORTED_100baseT_Full))
1690 if (cmd->duplex == DUPLEX_HALF
1691 && !(f & SUPPORTED_1000baseT_Half))
1693 if (cmd->duplex == DUPLEX_FULL
1694 && !(f & SUPPORTED_1000baseT_Full))
1702 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1706 if (!(f & SUPPORTED_Autoneg))
1710 dev->phy.def->ops->setup_aneg(&dev->phy,
1711 (cmd->advertising & f) |
1712 (dev->phy.advertising &
1714 ADVERTISED_Asym_Pause)));
1716 emac_force_link_update(dev);
1722 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1723 struct ethtool_ringparam *rp)
1725 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1726 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1729 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1730 struct ethtool_pauseparam *pp)
1732 struct ocp_enet_private *dev = ndev->priv;
1735 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1736 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1739 if (dev->phy.duplex == DUPLEX_FULL) {
1741 pp->rx_pause = pp->tx_pause = 1;
1742 else if (dev->phy.asym_pause)
1748 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1750 struct ocp_enet_private *dev = ndev->priv;
1751 return dev->tah_dev != 0;
1754 static int emac_get_regs_len(struct ocp_enet_private *dev)
1756 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1759 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1761 struct ocp_enet_private *dev = ndev->priv;
1762 return sizeof(struct emac_ethtool_regs_hdr) +
1763 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1764 zmii_get_regs_len(dev->zmii_dev) +
1765 rgmii_get_regs_len(dev->rgmii_dev) +
1766 tah_get_regs_len(dev->tah_dev);
1769 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1771 struct emac_ethtool_regs_subhdr *hdr = buf;
1773 hdr->version = EMAC_ETHTOOL_REGS_VER;
1774 hdr->index = dev->def->index;
1775 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1776 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1779 static void emac_ethtool_get_regs(struct net_device *ndev,
1780 struct ethtool_regs *regs, void *buf)
1782 struct ocp_enet_private *dev = ndev->priv;
1783 struct emac_ethtool_regs_hdr *hdr = buf;
1785 hdr->components = 0;
1788 local_irq_disable();
1789 buf = mal_dump_regs(dev->mal, buf);
1790 buf = emac_dump_regs(dev, buf);
1791 if (dev->zmii_dev) {
1792 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1793 buf = zmii_dump_regs(dev->zmii_dev, buf);
1795 if (dev->rgmii_dev) {
1796 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1797 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1800 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1801 buf = tah_dump_regs(dev->tah_dev, buf);
1806 static int emac_ethtool_nway_reset(struct net_device *ndev)
1808 struct ocp_enet_private *dev = ndev->priv;
1811 DBG("%d: nway_reset" NL, dev->def->index);
1813 if (dev->phy.address < 0)
1817 if (!dev->phy.autoneg) {
1822 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1823 emac_force_link_update(dev);
1830 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1832 return EMAC_ETHTOOL_STATS_COUNT;
1835 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1838 if (stringset == ETH_SS_STATS)
1839 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1842 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1843 struct ethtool_stats *estats,
1846 struct ocp_enet_private *dev = ndev->priv;
1847 local_irq_disable();
1848 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1849 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1850 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1854 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1855 struct ethtool_drvinfo *info)
1857 struct ocp_enet_private *dev = ndev->priv;
1859 strcpy(info->driver, "ibm_emac");
1860 strcpy(info->version, DRV_VERSION);
1861 info->fw_version[0] = '\0';
1862 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1863 info->n_stats = emac_ethtool_get_stats_count(ndev);
1864 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1867 static struct ethtool_ops emac_ethtool_ops = {
1868 .get_settings = emac_ethtool_get_settings,
1869 .set_settings = emac_ethtool_set_settings,
1870 .get_drvinfo = emac_ethtool_get_drvinfo,
1872 .get_regs_len = emac_ethtool_get_regs_len,
1873 .get_regs = emac_ethtool_get_regs,
1875 .nway_reset = emac_ethtool_nway_reset,
1877 .get_ringparam = emac_ethtool_get_ringparam,
1878 .get_pauseparam = emac_ethtool_get_pauseparam,
1880 .get_rx_csum = emac_ethtool_get_rx_csum,
1882 .get_strings = emac_ethtool_get_strings,
1883 .get_stats_count = emac_ethtool_get_stats_count,
1884 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1886 .get_link = ethtool_op_get_link,
1887 .get_tx_csum = ethtool_op_get_tx_csum,
1888 .get_sg = ethtool_op_get_sg,
1891 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1893 struct ocp_enet_private *dev = ndev->priv;
1894 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1896 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1898 if (dev->phy.address < 0)
1903 case SIOCDEVPRIVATE:
1904 data[0] = dev->phy.address;
1907 case SIOCDEVPRIVATE + 1:
1908 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1912 case SIOCDEVPRIVATE + 2:
1913 if (!capable(CAP_NET_ADMIN))
1915 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1922 static int __init emac_probe(struct ocp_device *ocpdev)
1924 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1925 struct net_device *ndev;
1926 struct ocp_device *maldev;
1927 struct ocp_enet_private *dev;
1930 DBG("%d: probe" NL, ocpdev->def->index);
1933 printk(KERN_ERR "emac%d: Missing additional data!\n",
1934 ocpdev->def->index);
1938 /* Allocate our net_device structure */
1939 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1941 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1942 ocpdev->def->index);
1947 dev->ldev = &ocpdev->dev;
1948 dev->def = ocpdev->def;
1949 SET_MODULE_OWNER(ndev);
1951 /* Find MAL device we are connected to */
1953 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1955 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1956 dev->def->index, emacdata->mal_idx);
1960 dev->mal = ocp_get_drvdata(maldev);
1962 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1963 dev->def->index, emacdata->mal_idx);
1968 /* Register with MAL */
1969 dev->commac.ops = &emac_commac_ops;
1970 dev->commac.dev = dev;
1971 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1972 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1973 err = mal_register_commac(dev->mal, &dev->commac);
1975 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1976 dev->def->index, emacdata->mal_idx);
1979 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1980 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1982 /* Get pointers to BD rings */
1984 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1985 emacdata->mal_tx_chan);
1987 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1988 emacdata->mal_rx_chan);
1990 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1991 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1994 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1995 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1997 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1998 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
1999 struct ocp_device *mdiodev =
2000 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2001 emacdata->mdio_idx);
2003 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2004 dev->def->index, emacdata->mdio_idx);
2008 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2009 if (!dev->mdio_dev) {
2011 "emac%d: emac%d hasn't been initialized yet!\n",
2012 dev->def->index, emacdata->mdio_idx);
2018 /* Attach to ZMII, if needed */
2019 if ((err = zmii_attach(dev)) != 0)
2022 /* Attach to RGMII, if needed */
2023 if ((err = rgmii_attach(dev)) != 0)
2026 /* Attach to TAH, if needed */
2027 if ((err = tah_attach(dev)) != 0)
2032 (struct emac_regs *)ioremap(dev->def->paddr,
2033 sizeof(struct emac_regs));
2035 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2041 /* Fill in MAC address */
2042 for (i = 0; i < 6; ++i)
2043 ndev->dev_addr[i] = emacdata->mac_addr[i];
2045 /* Set some link defaults before we can find out real parameters */
2046 dev->phy.speed = SPEED_100;
2047 dev->phy.duplex = DUPLEX_FULL;
2048 dev->phy.autoneg = AUTONEG_DISABLE;
2049 dev->phy.pause = dev->phy.asym_pause = 0;
2050 init_timer(&dev->link_timer);
2051 dev->link_timer.function = emac_link_timer;
2052 dev->link_timer.data = (unsigned long)dev;
2054 /* Find PHY if any */
2055 dev->phy.dev = ndev;
2056 dev->phy.mode = emacdata->phy_mode;
2057 if (emacdata->phy_map != 0xffffffff) {
2058 u32 phy_map = emacdata->phy_map | busy_phy_map;
2061 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2062 emacdata->phy_map, busy_phy_map);
2064 EMAC_RX_CLK_TX(dev->def->index);
2066 dev->phy.mdio_read = emac_mdio_read;
2067 dev->phy.mdio_write = emac_mdio_write;
2069 /* Configure EMAC with defaults so we can at least use MDIO
2070 * This is needed mostly for 440GX
2072 if (emac_phy_gpcs(dev->phy.mode)) {
2074 * Make GPCS PHY address equal to EMAC index.
2075 * We probably should take into account busy_phy_map
2076 * and/or phy_map here.
2078 dev->phy.address = dev->def->index;
2081 emac_configure(dev);
2083 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2084 if (!(phy_map & 1)) {
2086 busy_phy_map |= 1 << i;
2088 /* Quick check if there is a PHY at the address */
2089 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2090 if (r == 0xffff || r < 0)
2092 if (!mii_phy_probe(&dev->phy, i))
2096 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2102 if (dev->phy.def->ops->init)
2103 dev->phy.def->ops->init(&dev->phy);
2105 /* Disable any PHY features not supported by the platform */
2106 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2108 /* Setup initial link parameters */
2109 if (dev->phy.features & SUPPORTED_Autoneg) {
2110 adv = dev->phy.features;
2111 #if !defined(CONFIG_40x)
2112 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2114 /* Restart autonegotiation */
2115 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2117 u32 f = dev->phy.def->features;
2118 int speed = SPEED_10, fd = DUPLEX_HALF;
2120 /* Select highest supported speed/duplex */
2121 if (f & SUPPORTED_1000baseT_Full) {
2124 } else if (f & SUPPORTED_1000baseT_Half)
2126 else if (f & SUPPORTED_100baseT_Full) {
2129 } else if (f & SUPPORTED_100baseT_Half)
2131 else if (f & SUPPORTED_10baseT_Full)
2134 /* Force link parameters */
2135 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2140 /* PHY-less configuration.
2141 * XXX I probably should move these settings to emacdata
2143 dev->phy.address = -1;
2144 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2148 /* Fill in the driver function table */
2149 ndev->open = &emac_open;
2151 ndev->hard_start_xmit = &emac_start_xmit_sg;
2152 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2154 ndev->hard_start_xmit = &emac_start_xmit;
2155 ndev->tx_timeout = &emac_full_tx_reset;
2156 ndev->watchdog_timeo = 5 * HZ;
2157 ndev->stop = &emac_close;
2158 ndev->get_stats = &emac_stats;
2159 ndev->set_multicast_list = &emac_set_multicast_list;
2160 ndev->do_ioctl = &emac_ioctl;
2161 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2162 ndev->change_mtu = &emac_change_mtu;
2163 dev->commac.ops = &emac_commac_sg_ops;
2165 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2167 netif_carrier_off(ndev);
2168 netif_stop_queue(ndev);
2170 err = register_netdev(ndev);
2172 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2173 dev->def->index, err);
2177 ocp_set_drvdata(ocpdev, dev);
2179 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2180 ndev->name, dev->def->index,
2181 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2182 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2184 if (dev->phy.address >= 0)
2185 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2186 dev->phy.def->name, dev->phy.address);
2188 emac_dbg_register(dev->def->index, dev);
2192 iounmap((void *)dev->emacp);
2194 tah_fini(dev->tah_dev);
2196 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2198 zmii_fini(dev->zmii_dev, dev->zmii_input);
2200 mal_unregister_commac(dev->mal, &dev->commac);
2206 static struct ocp_device_id emac_ids[] = {
2207 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2208 { .vendor = OCP_VENDOR_INVALID}
2211 static struct ocp_driver emac_driver = {
2213 .id_table = emac_ids,
2214 .probe = emac_probe,
2215 .remove = emac_remove,
2218 static int __init emac_init(void)
2220 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2228 if (ocp_register_driver(&emac_driver)) {
2230 ocp_unregister_driver(&emac_driver);
2240 static void __exit emac_exit(void)
2243 ocp_unregister_driver(&emac_driver);
2248 module_init(emac_init);
2249 module_exit(emac_exit);