4 * Ethernet driver for the built in ethernet on the IBM 4xx PowerPC
7 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
9 * Based on original work by
11 * Armin Kuster <akuster@mvista.com>
12 * Johnnie Peters <jpeters@mvista.com>
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
19 * - Check for races in the "remove" code path
20 * - Add some Power Management to the MAC and the PHY
21 * - Audit remaining of non-rewritten code (--BenH)
22 * - Cleanup message display using msglevel mecanism
23 * - Address all errata
24 * - Audit all register update paths to ensure they
25 * are being written post soft reset if required.
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/ptrace.h>
33 #include <linux/errno.h>
34 #include <linux/ioport.h>
35 #include <linux/slab.h>
36 #include <linux/interrupt.h>
37 #include <linux/delay.h>
38 #include <linux/init.h>
39 #include <linux/types.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
43 #include <linux/bitops.h>
45 #include <asm/processor.h>
49 #include <asm/uaccess.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/skbuff.h>
55 #include <linux/crc32.h>
57 #include "ibm_emac_core.h"
59 //#define MDIO_DEBUG(fmt) printk fmt
60 #define MDIO_DEBUG(fmt)
62 //#define LINK_DEBUG(fmt) printk fmt
63 #define LINK_DEBUG(fmt)
65 //#define PKT_DEBUG(fmt) printk fmt
66 #define PKT_DEBUG(fmt)
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "2.0"
70 #define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
71 #define DRV_DESC "IBM EMAC Ethernet driver"
74 * When mdio_idx >= 0, contains a list of emac ocp_devs
75 * that have had their initialization deferred until the
76 * common MDIO controller has been initialized.
78 LIST_HEAD(emac_init_list);
80 MODULE_AUTHOR(DRV_AUTHOR);
81 MODULE_DESCRIPTION(DRV_DESC);
82 MODULE_LICENSE("GPL");
84 static int skb_res = SKB_RES;
85 module_param(skb_res, int, 0444);
86 MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
87 "The 405 handles a misaligned IP header fine but\n"
88 "this can help if you are routing to a tunnel or a\n"
89 "device that needs aligned data. 0..2");
91 #define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
93 static unsigned int rgmii_enable[] = {
100 static unsigned int rgmii_speed_mask[] = {
105 static unsigned int rgmii_speed100[] = {
110 static unsigned int rgmii_speed1000[] = {
115 #define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev))
117 static unsigned int zmii_enable[][4] = {
118 {ZMII_SMII0, ZMII_RMII0, ZMII_MII0,
119 ~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)},
120 {ZMII_SMII1, ZMII_RMII1, ZMII_MII1,
121 ~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
122 {ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
123 ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
124 {ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
127 static unsigned int mdi_enable[] = {
134 static unsigned int zmii_speed = 0x0;
135 static unsigned int zmii_speed100[] = {
142 /* Since multiple EMACs share MDIO lines in various ways, we need
143 * to avoid re-using the same PHY ID in cases where the arch didn't
144 * setup precise phy_map entries
146 static u32 busy_phy_map = 0;
148 /* If EMACs share a common MDIO device, this points to it */
149 static struct net_device *mdio_ndev = NULL;
151 struct emac_def_dev {
152 struct list_head link;
153 struct ocp_device *ocpdev;
154 struct ibm_ocp_mal *mal;
157 static struct net_device_stats *emac_stats(struct net_device *dev)
159 struct ocp_enet_private *fep = dev->priv;
164 emac_init_rgmii(struct ocp_device *rgmii_dev, int input, int phy_mode)
166 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(rgmii_dev);
167 const char *mode_name[] = { "RTBI", "RGMII", "TBI", "GMII" };
171 rgmii = kmalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL);
175 "rgmii%d: Out of memory allocating RGMII structure!\n",
176 rgmii_dev->def->index);
180 memset(rgmii, 0, sizeof(*rgmii));
183 (struct rgmii_regs *)ioremap(rgmii_dev->def->paddr,
184 sizeof(*rgmii->base));
185 if (rgmii->base == NULL) {
187 "rgmii%d: Cannot ioremap bridge registers!\n",
188 rgmii_dev->def->index);
193 ocp_set_drvdata(rgmii_dev, rgmii);
211 rgmii->base->fer &= ~RGMII_FER_MASK(input);
212 rgmii->base->fer |= rgmii_enable[mode] << (4 * input);
214 switch ((rgmii->base->fer & RGMII_FER_MASK(input)) >> (4 *
230 /* Set mode to RGMII if nothing valid is detected */
234 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n",
235 rgmii_dev->def->index, input, mode_name[mode]);
237 rgmii->mode[input] = mode;
244 emac_rgmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
246 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev);
247 unsigned int rgmii_speed;
249 rgmii_speed = in_be32(&rgmii->base->ssr);
251 rgmii_speed &= ~rgmii_speed_mask[input];
254 rgmii_speed |= rgmii_speed1000[input];
255 else if (speed == 100)
256 rgmii_speed |= rgmii_speed100[input];
258 out_be32(&rgmii->base->ssr, rgmii_speed);
261 static void emac_close_rgmii(struct ocp_device *ocpdev)
263 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev);
264 BUG_ON(!rgmii || rgmii->users == 0);
266 if (!--rgmii->users) {
267 ocp_set_drvdata(ocpdev, NULL);
268 iounmap((void *)rgmii->base);
273 static int emac_init_zmii(struct ocp_device *zmii_dev, int input, int phy_mode)
275 struct ibm_ocp_zmii *zmii = ZMII_PRIV(zmii_dev);
276 const char *mode_name[] = { "SMII", "RMII", "MII" };
280 zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
283 "zmii%d: Out of memory allocating ZMII structure!\n",
284 zmii_dev->def->index);
287 memset(zmii, 0, sizeof(*zmii));
290 (struct zmii_regs *)ioremap(zmii_dev->def->paddr,
291 sizeof(*zmii->base));
292 if (zmii->base == NULL) {
294 "zmii%d: Cannot ioremap bridge registers!\n",
295 zmii_dev->def->index);
300 ocp_set_drvdata(zmii_dev, zmii);
315 zmii->base->fer &= ~ZMII_FER_MASK(input);
316 zmii->base->fer |= zmii_enable[input][mode];
318 switch ((zmii->base->fer & ZMII_FER_MASK(input)) << (4 * input)) {
330 /* Set mode to SMII if nothing valid is detected */
334 printk(KERN_NOTICE "zmii%d: input %d in %s mode\n",
335 zmii_dev->def->index, input, mode_name[mode]);
337 zmii->mode[input] = mode;
343 static void emac_enable_zmii_port(struct ocp_device *ocpdev, int input)
346 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
348 mask = in_be32(&zmii->base->fer);
349 mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */
350 mask |= zmii_enable[input][zmii->mode[input]] | mdi_enable[input];
351 out_be32(&zmii->base->fer, mask);
355 emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
357 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
360 zmii_speed |= zmii_speed100[input];
362 zmii_speed &= ~zmii_speed100[input];
364 out_be32(&zmii->base->ssr, zmii_speed);
367 static void emac_close_zmii(struct ocp_device *ocpdev)
369 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
370 BUG_ON(!zmii || zmii->users == 0);
372 if (!--zmii->users) {
373 ocp_set_drvdata(ocpdev, NULL);
374 iounmap((void *)zmii->base);
379 int emac_phy_read(struct net_device *dev, int mii_id, int reg)
383 struct ocp_enet_private *fep = dev->priv;
384 emac_t *emacp = fep->emacp;
386 MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id,
389 /* Enable proper ZMII port */
391 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
393 /* Use the EMAC that has the MDIO port */
401 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
402 && (count++ < MDIO_DELAY))
404 MDIO_DEBUG((" (count was %d)\n", count));
406 if ((stacr & EMAC_STACR_OC) == 0) {
407 printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name);
411 /* Clear the speed bits and make a read request to the PHY */
412 stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
413 stacr |= ((mii_id & 0x1F) << 5);
415 out_be32(&emacp->em0stacr, stacr);
418 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
419 && (count++ < MDIO_DELAY))
421 MDIO_DEBUG((" (count was %d)\n", count));
423 if ((stacr & EMAC_STACR_OC) == 0) {
424 printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
428 /* Check for a read error */
429 if (stacr & EMAC_STACR_PHYE) {
430 MDIO_DEBUG(("EMAC MDIO PHY error !\n"));
434 MDIO_DEBUG((" -> 0x%x\n", stacr >> 16));
436 return (stacr >> 16);
439 void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
443 struct ocp_enet_private *fep = dev->priv;
444 emac_t *emacp = fep->emacp;
446 MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n",
447 dev->name, mii_id, reg, data));
449 /* Enable proper ZMII port */
451 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
453 /* Use the EMAC that has the MDIO port */
461 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
462 && (count++ < MDIO_DELAY))
464 MDIO_DEBUG((" (count was %d)\n", count));
466 if ((stacr & EMAC_STACR_OC) == 0) {
467 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
471 /* Clear the speed bits and make a read request to the PHY */
473 stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
474 stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16);
476 out_be32(&emacp->em0stacr, stacr);
479 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0)
480 && (count++ < MDIO_DELAY))
482 MDIO_DEBUG((" (count was %d)\n", count));
484 if ((stacr & EMAC_STACR_OC) == 0)
485 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
487 /* Check for a write error */
488 if ((stacr & EMAC_STACR_PHYE) != 0) {
489 MDIO_DEBUG(("EMAC MDIO PHY error !\n"));
493 static void emac_txeob_dev(void *param, u32 chanmask)
495 struct net_device *dev = param;
496 struct ocp_enet_private *fep = dev->priv;
499 spin_lock_irqsave(&fep->lock, flags);
501 PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt));
503 while (fep->tx_cnt &&
504 !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
506 if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) {
507 /* Tell the system the transmit completed. */
508 dma_unmap_single(&fep->ocpdev->dev,
509 fep->tx_desc[fep->ack_slot].data_ptr,
510 fep->tx_desc[fep->ack_slot].data_len,
512 dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
514 if (fep->tx_desc[fep->ack_slot].ctrl &
515 (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC))
516 fep->stats.collisions++;
519 fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL;
520 if (++fep->ack_slot == NUM_TX_BUFF)
525 if (fep->tx_cnt < NUM_TX_BUFF)
526 netif_wake_queue(dev);
528 PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt));
530 spin_unlock_irqrestore(&fep->lock, flags);
534 Fill/Re-fill the rx chain with valid ctrl/ptrs.
535 This function will fill from rx_slot up to the parm end.
536 So to completely fill the chain pre-set rx_slot to 0 and
539 static void emac_rx_fill(struct net_device *dev, int end)
542 struct ocp_enet_private *fep = dev->priv;
546 /* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
547 * it breaks our cache line alignement. However, we still allocate
548 * +16 so that we end up allocating the exact same size as
549 * dev_alloc_skb() would do.
550 * Also, because of the skb_res, the max DMA size we give to EMAC
551 * is slighly wrong, causing it to potentially DMA 2 more bytes
552 * from a broken/oversized packet. These 16 bytes will take care
553 * that we don't walk on somebody else toes with that.
556 alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);
558 if (fep->rx_skb[i] == NULL) {
559 /* Keep rx_slot here, the next time clean/fill is called
560 * we will try again before the MAL wraps back here
561 * If the MAL tries to use this descriptor with
562 * the EMPTY bit off it will cause the
563 * rxde interrupt. That is where we will
564 * try again to allocate an sk_buff.
571 skb_reserve(fep->rx_skb[i], skb_res);
573 /* We must NOT dma_map_single the cache line right after the
574 * buffer, so we must crop our sync size to account for the
577 fep->rx_desc[i].data_ptr =
578 (unsigned char *)dma_map_single(&fep->ocpdev->dev,
579 (void *)fep->rx_skb[i]->
581 fep->rx_buffer_size -
582 skb_res, DMA_FROM_DEVICE);
585 * Some 4xx implementations use the previously
586 * reserved bits in data_len to encode the MS
587 * 4-bits of a 36-bit physical address (ERPN)
588 * This must be initialized.
590 fep->rx_desc[i].data_len = 0;
591 fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |
592 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
594 } while ((i = (i + 1) % NUM_RX_BUFF) != end);
600 emac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb)
602 struct ocp_enet_private *fep = dev->priv;
604 /* Exit if interface has no TAH engine */
606 skb->ip_summed = CHECKSUM_NONE;
610 /* Check for TCP/UDP/IP csum error */
611 if (ctrl & EMAC_CSUM_VER_ERROR) {
612 /* Let the stack verify checksum errors */
613 skb->ip_summed = CHECKSUM_NONE;
614 /* adapter->hw_csum_err++; */
617 skb->ip_summed = CHECKSUM_UNNECESSARY;
618 /* adapter->hw_csum_good++; */
622 static int emac_rx_clean(struct net_device *dev)
624 int i, b, bnum = 0, buf[6];
625 int error, frame_length;
626 struct ocp_enet_private *fep = dev->priv;
631 PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot));
634 if (fep->rx_skb[i] == NULL)
635 continue; /*we have already handled the packet but haved failed to alloc */
637 since rx_desc is in uncached mem we don't keep reading it directly
638 we pull out a local copy of ctrl and do the checks on the copy.
640 ctrl = fep->rx_desc[i].ctrl;
641 if (ctrl & MAL_RX_CTRL_EMPTY)
642 break; /*we don't have any more ready packets */
644 if (EMAC_IS_BAD_RX_PACKET(ctrl)) {
645 fep->stats.rx_errors++;
646 fep->stats.rx_dropped++;
648 if (ctrl & EMAC_RX_ST_OE)
649 fep->stats.rx_fifo_errors++;
650 if (ctrl & EMAC_RX_ST_AE)
651 fep->stats.rx_frame_errors++;
652 if (ctrl & EMAC_RX_ST_BFCS)
653 fep->stats.rx_crc_errors++;
654 if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
655 EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
656 fep->stats.rx_length_errors++;
658 if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==
659 (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {
660 /* Single descriptor packet */
661 emac_rx_csum(dev, ctrl, fep->rx_skb[i]);
662 /* Send the skb up the chain. */
663 frame_length = fep->rx_desc[i].data_len - 4;
664 skb_put(fep->rx_skb[i], frame_length);
665 fep->rx_skb[i]->dev = dev;
666 fep->rx_skb[i]->protocol =
667 eth_type_trans(fep->rx_skb[i], dev);
668 error = netif_rx(fep->rx_skb[i]);
670 if ((error == NET_RX_DROP) ||
671 (error == NET_RX_BAD)) {
672 fep->stats.rx_dropped++;
674 fep->stats.rx_packets++;
675 fep->stats.rx_bytes += frame_length;
677 fep->rx_skb[i] = NULL;
679 /* Multiple descriptor packet */
680 if (ctrl & MAL_RX_CTRL_FIRST) {
681 if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].
682 ctrl & MAL_RX_CTRL_EMPTY)
689 if (((ctrl & MAL_RX_CTRL_FIRST) !=
690 MAL_RX_CTRL_FIRST) &&
691 ((ctrl & MAL_RX_CTRL_LAST) !=
693 if (fep->rx_desc[(i + 1) %
703 if (ctrl & MAL_RX_CTRL_LAST) {
706 skb_put(fep->rx_skb[buf[0]],
707 fep->rx_desc[buf[0]].data_len);
708 for (b = 1; b < bnum; b++) {
710 * MAL is braindead, we need
711 * to copy the remainder
712 * of the packet from the
713 * latter descriptor buffers
714 * to the first skb. Then
715 * dispose of the source
718 * Once the stack is fixed
719 * to handle frags on most
720 * protocols we can generate
721 * a fragmented skb with
724 memcpy(fep->rx_skb[buf[0]]->
726 fep->rx_skb[buf[0]]->len,
727 fep->rx_skb[buf[b]]->
729 fep->rx_desc[buf[b]].
731 skb_put(fep->rx_skb[buf[0]],
732 fep->rx_desc[buf[b]].
734 dma_unmap_single(&fep->ocpdev->
748 emac_rx_csum(dev, ctrl,
749 fep->rx_skb[buf[0]]);
751 fep->rx_skb[buf[0]]->dev = dev;
752 fep->rx_skb[buf[0]]->protocol =
753 eth_type_trans(fep->rx_skb[buf[0]],
755 error = netif_rx(fep->rx_skb[buf[0]]);
757 if ((error == NET_RX_DROP)
758 || (error == NET_RX_BAD)) {
759 fep->stats.rx_dropped++;
761 fep->stats.rx_packets++;
762 fep->stats.rx_bytes +=
763 fep->rx_skb[buf[0]]->len;
765 for (b = 0; b < bnum; b++)
766 fep->rx_skb[buf[b]] = NULL;
770 } while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot);
772 PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot));
777 static void emac_rxeob_dev(void *param, u32 chanmask)
779 struct net_device *dev = param;
780 struct ocp_enet_private *fep = dev->priv;
784 spin_lock_irqsave(&fep->lock, flags);
785 if ((n = emac_rx_clean(dev)) != fep->rx_slot)
786 emac_rx_fill(dev, n);
787 spin_unlock_irqrestore(&fep->lock, flags);
791 * This interrupt should never occurr, we don't program
792 * the MAL for contiunous mode.
794 static void emac_txde_dev(void *param, u32 chanmask)
796 struct net_device *dev = param;
797 struct ocp_enet_private *fep = dev->priv;
799 printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name);
804 /* Reenable the transmit channel */
805 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
809 * This interrupt should be very rare at best. This occurs when
810 * the hardware has a problem with the receive descriptors. The manual
811 * states that it occurs when the hardware cannot the receive descriptor
812 * empty bit is not set. The recovery mechanism will be to
813 * traverse through the descriptors, handle any that are marked to be
814 * handled and reinitialize each along the way. At that point the driver
817 static void emac_rxde_dev(void *param, u32 chanmask)
819 struct net_device *dev = param;
820 struct ocp_enet_private *fep = dev->priv;
823 if (net_ratelimit()) {
824 printk(KERN_WARNING "%s: receive descriptor error\n",
832 /* Disable RX channel */
833 spin_lock_irqsave(&fep->lock, flags);
834 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
836 /* For now, charge the error against all emacs */
837 fep->stats.rx_errors++;
839 /* so do we have any good packets still? */
842 /* When the interface is restarted it resets processing to the
843 * first descriptor in the table.
847 emac_rx_fill(dev, 0);
849 set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask);
850 set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask);
852 /* Reenable the receive channels */
853 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
854 spin_unlock_irqrestore(&fep->lock, flags);
858 emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs)
860 struct net_device *dev = dev_instance;
861 struct ocp_enet_private *fep = dev->priv;
862 emac_t *emacp = fep->emacp;
863 unsigned long tmp_em0isr;
866 tmp_em0isr = in_be32(&emacp->em0isr);
867 if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
868 /* This error is a hard transmit error - could retransmit */
869 fep->stats.tx_errors++;
871 /* Reenable the transmit channel */
872 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
875 fep->stats.rx_errors++;
878 if (tmp_em0isr & EMAC_ISR_RP)
879 fep->stats.rx_length_errors++;
880 if (tmp_em0isr & EMAC_ISR_ALE)
881 fep->stats.rx_frame_errors++;
882 if (tmp_em0isr & EMAC_ISR_BFCS)
883 fep->stats.rx_crc_errors++;
884 if (tmp_em0isr & EMAC_ISR_PTLE)
885 fep->stats.rx_length_errors++;
886 if (tmp_em0isr & EMAC_ISR_ORE)
887 fep->stats.rx_length_errors++;
888 if (tmp_em0isr & EMAC_ISR_TE0)
889 fep->stats.tx_aborted_errors++;
891 emac_err_dump(dev, tmp_em0isr);
893 out_be32(&emacp->em0isr, tmp_em0isr);
898 static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
902 struct ocp_enet_private *fep = dev->priv;
903 emac_t *emacp = fep->emacp;
905 unsigned int offset = 0, size, f, tx_slot_first;
906 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
908 spin_lock_irqsave(&fep->lock, flags);
910 len -= skb->data_len;
912 if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) {
913 PKT_DEBUG(("emac_start_xmit() stopping queue\n"));
914 netif_stop_queue(dev);
915 spin_unlock_irqrestore(&fep->lock, flags);
919 tx_slot_first = fep->tx_slot;
922 size = min(len, DESC_BUF_SIZE);
924 fep->tx_desc[fep->tx_slot].data_len = (short)size;
925 fep->tx_desc[fep->tx_slot].data_ptr =
926 (unsigned char *)dma_map_single(&fep->ocpdev->dev,
927 (void *)((unsigned int)skb->
929 size, DMA_TO_DEVICE);
931 ctrl = EMAC_TX_CTRL_DFLT;
932 if (fep->tx_slot != tx_slot_first)
933 ctrl |= MAL_TX_CTRL_READY;
934 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
935 ctrl |= MAL_TX_CTRL_WRAP;
936 if (!nr_frags && (len == size)) {
937 ctrl |= MAL_TX_CTRL_LAST;
938 fep->tx_skb[fep->tx_slot] = skb;
940 if (skb->ip_summed == CHECKSUM_HW)
941 ctrl |= EMAC_TX_CTRL_TAH_CSUM;
943 fep->tx_desc[fep->tx_slot].ctrl = ctrl;
949 if (++fep->tx_cnt == NUM_TX_BUFF)
950 netif_stop_queue(dev);
952 /* Next descriptor */
953 if (++fep->tx_slot == NUM_TX_BUFF)
957 for (f = 0; f < nr_frags; f++) {
958 struct skb_frag_struct *frag;
960 frag = &skb_shinfo(skb)->frags[f];
965 size = min(len, DESC_BUF_SIZE);
967 dma_map_page(&fep->ocpdev->dev,
969 frag->page_offset + offset,
970 size, DMA_TO_DEVICE);
972 ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;
973 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
974 ctrl |= MAL_TX_CTRL_WRAP;
975 if ((f == (nr_frags - 1)) && (len == size)) {
976 ctrl |= MAL_TX_CTRL_LAST;
977 fep->tx_skb[fep->tx_slot] = skb;
980 if (skb->ip_summed == CHECKSUM_HW)
981 ctrl |= EMAC_TX_CTRL_TAH_CSUM;
983 fep->tx_desc[fep->tx_slot].data_len = (short)size;
984 fep->tx_desc[fep->tx_slot].data_ptr =
985 (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) +
986 frag->page_offset + offset);
987 fep->tx_desc[fep->tx_slot].ctrl = ctrl;
993 if (++fep->tx_cnt == NUM_TX_BUFF)
994 netif_stop_queue(dev);
996 /* Next descriptor */
997 if (++fep->tx_slot == NUM_TX_BUFF)
1003 * Deferred set READY on first descriptor of packet to
1004 * avoid TX MAL race.
1006 fep->tx_desc[tx_slot_first].ctrl |= MAL_TX_CTRL_READY;
1008 /* Send the packet out. */
1009 out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
1011 fep->stats.tx_packets++;
1012 fep->stats.tx_bytes += skb->len;
1014 PKT_DEBUG(("emac_start_xmit() exitn"));
1016 spin_unlock_irqrestore(&fep->lock, flags);
1021 static int emac_adjust_to_link(struct ocp_enet_private *fep)
1023 emac_t *emacp = fep->emacp;
1024 unsigned long mode_reg;
1025 int full_duplex, speed;
1030 /* set mode register 1 defaults */
1031 mode_reg = EMAC_M1_DEFAULT;
1033 /* Read link mode on PHY */
1034 if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) {
1035 /* If an error occurred, we don't deal with it yet */
1036 full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL);
1037 speed = fep->phy_mii.speed;
1041 /* set speed (default is 10Mb) */
1044 mode_reg |= EMAC_M1_RFS_16K;
1045 if (fep->rgmii_dev) {
1046 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(fep->rgmii_dev);
1048 if ((rgmii->mode[fep->rgmii_input] == RTBI)
1049 || (rgmii->mode[fep->rgmii_input] == TBI))
1050 mode_reg |= EMAC_M1_MF_1000GPCS;
1052 mode_reg |= EMAC_M1_MF_1000MBPS;
1054 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1059 mode_reg |= EMAC_M1_MF_100MBPS | EMAC_M1_RFS_4K;
1061 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1064 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1069 mode_reg = (mode_reg & ~EMAC_M1_MF_100MBPS) | EMAC_M1_RFS_4K;
1071 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1074 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1079 mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
1081 mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
1083 LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n",
1084 fep->ndev->name, speed, full_duplex, fep->opened));
1086 printk(KERN_INFO "%s: Speed: %d, %s duplex.\n",
1087 fep->ndev->name, speed, full_duplex ? "Full" : "Half");
1089 out_be32(&emacp->em0mr1, mode_reg);
1094 static int emac_set_mac_address(struct net_device *ndev, void *p)
1096 struct ocp_enet_private *fep = ndev->priv;
1097 emac_t *emacp = fep->emacp;
1098 struct sockaddr *addr = p;
1100 if (!is_valid_ether_addr(addr->sa_data))
1101 return -EADDRNOTAVAIL;
1103 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1105 /* set the high address */
1106 out_be32(&emacp->em0iahr,
1107 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]);
1109 /* set the low address */
1110 out_be32(&emacp->em0ialr,
1111 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1112 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1117 static int emac_change_mtu(struct net_device *dev, int new_mtu)
1119 struct ocp_enet_private *fep = dev->priv;
1120 int old_mtu = dev->mtu;
1121 unsigned long mode_reg;
1122 emac_t *emacp = fep->emacp;
1125 unsigned long flags;
1127 if ((new_mtu < EMAC_MIN_MTU) || (new_mtu > EMAC_MAX_MTU)) {
1129 "emac: Invalid MTU setting, MTU must be between %d and %d\n",
1130 EMAC_MIN_MTU, EMAC_MAX_MTU);
1134 if (old_mtu != new_mtu && netif_running(dev)) {
1135 /* Stop rx engine */
1136 em0mr0 = in_be32(&emacp->em0mr0);
1137 out_be32(&emacp->em0mr0, em0mr0 & ~EMAC_M0_RXE);
1139 /* Wait for descriptors to be empty */
1142 for (i = 0; i < NUM_RX_BUFF; i++)
1143 if (!(fep->rx_desc[i].ctrl & MAL_RX_CTRL_EMPTY)) {
1145 "emac: RX ring is still full\n");
1150 spin_lock_irqsave(&fep->lock, flags);
1152 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1154 /* Destroy all old rx skbs */
1155 for (i = 0; i < NUM_RX_BUFF; i++) {
1156 dma_unmap_single(&fep->ocpdev->dev,
1157 fep->rx_desc[i].data_ptr,
1158 fep->rx_desc[i].data_len,
1160 dev_kfree_skb(fep->rx_skb[i]);
1161 fep->rx_skb[i] = NULL;
1164 /* Set new rx_buffer_size, jumbo cap, and advertise new mtu */
1165 mode_reg = in_be32(&emacp->em0mr1);
1166 if (new_mtu > ENET_DEF_MTU_SIZE) {
1167 mode_reg |= EMAC_M1_JUMBO_ENABLE;
1168 fep->rx_buffer_size = EMAC_MAX_FRAME;
1170 mode_reg &= ~EMAC_M1_JUMBO_ENABLE;
1171 fep->rx_buffer_size = ENET_DEF_BUF_SIZE;
1174 out_be32(&emacp->em0mr1, mode_reg);
1176 /* Re-init rx skbs */
1178 emac_rx_fill(dev, 0);
1180 /* Restart the rx engine */
1181 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1182 out_be32(&emacp->em0mr0, em0mr0 | EMAC_M0_RXE);
1184 spin_unlock_irqrestore(&fep->lock, flags);
1190 static void __emac_set_multicast_list(struct net_device *dev)
1192 struct ocp_enet_private *fep = dev->priv;
1193 emac_t *emacp = fep->emacp;
1194 u32 rmr = in_be32(&emacp->em0rmr);
1196 /* First clear all special bits, they can be set later */
1197 rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE);
1199 if (dev->flags & IFF_PROMISC) {
1200 rmr |= EMAC_RMR_PME;
1201 } else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
1203 * Must be setting up to use multicast
1204 * Now check for promiscuous multicast
1206 rmr |= EMAC_RMR_PMME;
1207 } else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
1208 unsigned short em0gaht[4] = { 0, 0, 0, 0 };
1209 struct dev_mc_list *dmi;
1211 /* Need to hash on the multicast address. */
1212 for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
1213 unsigned long mc_crc;
1214 unsigned int bit_number;
1216 mc_crc = ether_crc(6, (char *)dmi->dmi_addr);
1217 bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
1218 em0gaht[bit_number >> 4] |=
1219 0x8000 >> (bit_number & 0x0f);
1221 emacp->em0gaht1 = em0gaht[0];
1222 emacp->em0gaht2 = em0gaht[1];
1223 emacp->em0gaht3 = em0gaht[2];
1224 emacp->em0gaht4 = em0gaht[3];
1226 /* Turn on multicast addressing */
1227 rmr |= EMAC_RMR_MAE;
1229 out_be32(&emacp->em0rmr, rmr);
1232 static int emac_init_tah(struct ocp_enet_private *fep)
1236 /* Initialize TAH and enable checksum verification */
1237 tahp = (tah_t *) ioremap(fep->tah_dev->def->paddr, sizeof(*tahp));
1240 printk(KERN_ERR "tah%d: Cannot ioremap TAH registers!\n",
1241 fep->tah_dev->def->index);
1246 out_be32(&tahp->tah_mr, TAH_MR_SR);
1248 /* wait for reset to complete */
1249 while (in_be32(&tahp->tah_mr) & TAH_MR_SR) ;
1251 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
1252 out_be32(&tahp->tah_mr,
1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
1261 static void emac_init_rings(struct net_device *dev)
1263 struct ocp_enet_private *ep = dev->priv;
1266 ep->tx_desc = (struct mal_descriptor *)((char *)ep->mal->tx_virt_addr +
1270 (struct mal_descriptor *)((char *)ep->mal->rx_virt_addr +
1271 (ep->mal_rx_chan * MAL_DT_ALIGN));
1273 /* Fill in the transmit descriptor ring. */
1274 for (loop = 0; loop < NUM_TX_BUFF; loop++) {
1275 if (ep->tx_skb[loop]) {
1276 dma_unmap_single(&ep->ocpdev->dev,
1277 ep->tx_desc[loop].data_ptr,
1278 ep->tx_desc[loop].data_len,
1280 dev_kfree_skb_irq(ep->tx_skb[loop]);
1282 ep->tx_skb[loop] = NULL;
1283 ep->tx_desc[loop].ctrl = 0;
1284 ep->tx_desc[loop].data_len = 0;
1285 ep->tx_desc[loop].data_ptr = NULL;
1287 ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
1289 /* Format the receive descriptor ring. */
1291 /* Default is MTU=1500 + Ethernet overhead */
1292 ep->rx_buffer_size = dev->mtu + ENET_HEADER_SIZE + ENET_FCS_SIZE;
1293 emac_rx_fill(dev, 0);
1294 if (ep->rx_slot != 0) {
1296 "%s: Not enough mem for RxChain durning Open?\n",
1298 /*We couldn't fill the ring at startup?
1299 *We could clean up and fail to open but right now we will try to
1300 *carry on. It may be a sign of a bad NUM_RX_BUFF value
1309 static void emac_reset_configure(struct ocp_enet_private *fep)
1311 emac_t *emacp = fep->emacp;
1314 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1315 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1318 * Check for a link, some PHYs don't provide a clock if
1319 * no link is present. Some EMACs will not come out of
1320 * soft reset without a PHY clock present.
1322 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1323 /* Reset the EMAC */
1324 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1326 for (i = 0; i < 100; i++) {
1327 if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
1333 printk(KERN_ERR "%s: Cannot reset EMAC\n",
1339 /* Switch IRQs off for now */
1340 out_be32(&emacp->em0iser, 0);
1342 /* Configure MAL rx channel */
1343 mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG);
1345 /* set the high address */
1346 out_be32(&emacp->em0iahr,
1347 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]);
1349 /* set the low address */
1350 out_be32(&emacp->em0ialr,
1351 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1352 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1354 /* Adjust to link */
1355 if (netif_carrier_ok(fep->ndev))
1356 emac_adjust_to_link(fep);
1358 /* enable broadcast/individual address and RX FIFO defaults */
1359 out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT);
1361 /* set transmit request threshold register */
1362 out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT);
1364 /* Reconfigure multicast */
1365 __emac_set_multicast_list(fep->ndev);
1367 /* Set receiver/transmitter defaults */
1368 out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT);
1369 out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT);
1370 out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT);
1373 out_be32(&emacp->em0ipgvr, CONFIG_IBM_EMAC_FGAP);
1375 /* set VLAN Tag Protocol Identifier */
1376 out_be32(&emacp->em0vtpid, 0x8100);
1378 /* Init ring buffers */
1379 emac_init_rings(fep->ndev);
1382 static void emac_kick(struct ocp_enet_private *fep)
1384 emac_t *emacp = fep->emacp;
1385 unsigned long emac_ier;
1387 emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
1388 EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
1389 EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1391 out_be32(&emacp->em0iser, emac_ier);
1393 /* enable all MAL transmit and receive channels */
1394 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1395 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1397 /* set transmit and receive enable */
1398 out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE);
1402 emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep)
1409 /* Default advertise */
1410 advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1411 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1412 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full;
1413 autoneg = fep->want_autoneg;
1414 forced_speed = fep->phy_mii.speed;
1415 forced_duplex = fep->phy_mii.duplex;
1417 /* Setup link parameters */
1419 if (ep->autoneg == AUTONEG_ENABLE) {
1420 advertise = ep->advertising;
1424 forced_speed = ep->speed;
1425 forced_duplex = ep->duplex;
1429 /* Configure PHY & start aneg */
1430 fep->want_autoneg = autoneg;
1432 LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n",
1433 fep->ndev->name, advertise));
1434 fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise);
1436 LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n",
1437 fep->ndev->name, forced_speed, forced_duplex));
1438 fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed,
1441 fep->timer_ticks = 0;
1442 mod_timer(&fep->link_timer, jiffies + HZ);
1445 static void emac_link_timer(unsigned long data)
1447 struct ocp_enet_private *fep = (struct ocp_enet_private *)data;
1450 if (fep->going_away)
1453 spin_lock_irq(&fep->lock);
1455 link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii);
1456 LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link));
1458 if (link == netif_carrier_ok(fep->ndev)) {
1459 if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10)
1460 emac_start_link(fep, NULL);
1463 printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name,
1464 link ? "Up" : "Down");
1466 netif_carrier_on(fep->ndev);
1467 /* Chip needs a full reset on config change. That sucks, so I
1468 * should ultimately move that to some tasklet to limit
1469 * latency peaks caused by this code
1471 emac_reset_configure(fep);
1475 fep->timer_ticks = 0;
1476 netif_carrier_off(fep->ndev);
1479 mod_timer(&fep->link_timer, jiffies + HZ);
1480 spin_unlock_irq(&fep->lock);
1483 static void emac_set_multicast_list(struct net_device *dev)
1485 struct ocp_enet_private *fep = dev->priv;
1487 spin_lock_irq(&fep->lock);
1488 __emac_set_multicast_list(dev);
1489 spin_unlock_irq(&fep->lock);
1492 static int emac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
1494 struct ocp_enet_private *fep = ndev->priv;
1496 cmd->supported = fep->phy_mii.def->features;
1497 cmd->port = PORT_MII;
1498 cmd->transceiver = XCVR_EXTERNAL;
1499 cmd->phy_address = fep->mii_phy_addr;
1500 spin_lock_irq(&fep->lock);
1501 cmd->autoneg = fep->want_autoneg;
1502 cmd->speed = fep->phy_mii.speed;
1503 cmd->duplex = fep->phy_mii.duplex;
1504 spin_unlock_irq(&fep->lock);
1508 static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
1510 struct ocp_enet_private *fep = ndev->priv;
1511 unsigned long features = fep->phy_mii.def->features;
1513 if (!capable(CAP_NET_ADMIN))
1516 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1518 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1520 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1522 if (cmd->autoneg == AUTONEG_DISABLE)
1523 switch (cmd->speed) {
1525 if (cmd->duplex == DUPLEX_HALF &&
1526 (features & SUPPORTED_10baseT_Half) == 0)
1528 if (cmd->duplex == DUPLEX_FULL &&
1529 (features & SUPPORTED_10baseT_Full) == 0)
1533 if (cmd->duplex == DUPLEX_HALF &&
1534 (features & SUPPORTED_100baseT_Half) == 0)
1536 if (cmd->duplex == DUPLEX_FULL &&
1537 (features & SUPPORTED_100baseT_Full) == 0)
1541 if (cmd->duplex == DUPLEX_HALF &&
1542 (features & SUPPORTED_1000baseT_Half) == 0)
1544 if (cmd->duplex == DUPLEX_FULL &&
1545 (features & SUPPORTED_1000baseT_Full) == 0)
1550 } else if ((features & SUPPORTED_Autoneg) == 0)
1552 spin_lock_irq(&fep->lock);
1553 emac_start_link(fep, cmd);
1554 spin_unlock_irq(&fep->lock);
1559 emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1561 struct ocp_enet_private *fep = ndev->priv;
1563 strcpy(info->driver, DRV_NAME);
1564 strcpy(info->version, DRV_VERSION);
1565 info->fw_version[0] = '\0';
1566 sprintf(info->bus_info, "IBM EMAC %d", fep->ocpdev->def->index);
1567 info->regdump_len = 0;
1570 static int emac_nway_reset(struct net_device *ndev)
1572 struct ocp_enet_private *fep = ndev->priv;
1574 if (!fep->want_autoneg)
1576 spin_lock_irq(&fep->lock);
1577 emac_start_link(fep, NULL);
1578 spin_unlock_irq(&fep->lock);
1582 static u32 emac_get_link(struct net_device *ndev)
1584 return netif_carrier_ok(ndev);
1587 static struct ethtool_ops emac_ethtool_ops = {
1588 .get_settings = emac_get_settings,
1589 .set_settings = emac_set_settings,
1590 .get_drvinfo = emac_get_drvinfo,
1591 .nway_reset = emac_nway_reset,
1592 .get_link = emac_get_link
1595 static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1597 struct ocp_enet_private *fep = dev->priv;
1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1602 data[0] = fep->mii_phy_addr;
1605 data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
1608 if (!capable(CAP_NET_ADMIN))
1611 emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]);
1618 static int emac_open(struct net_device *dev)
1620 struct ocp_enet_private *fep = dev->priv;
1623 spin_lock_irq(&fep->lock);
1626 netif_carrier_off(dev);
1628 /* Reset & configure the chip */
1629 emac_reset_configure(fep);
1631 spin_unlock_irq(&fep->lock);
1633 /* Request our interrupt lines */
1634 rc = request_irq(dev->irq, emac_mac_irq, 0, "IBM EMAC MAC", dev);
1636 printk("dev->irq %d failed\n", dev->irq);
1639 /* Kick the chip rx & tx channels into life */
1640 spin_lock_irq(&fep->lock);
1642 spin_unlock_irq(&fep->lock);
1644 netif_start_queue(dev);
1649 static int emac_close(struct net_device *dev)
1651 struct ocp_enet_private *fep = dev->priv;
1652 emac_t *emacp = fep->emacp;
1654 /* XXX Stop IRQ emitting here */
1655 spin_lock_irq(&fep->lock);
1657 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1658 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1659 netif_carrier_off(dev);
1660 netif_stop_queue(dev);
1663 * Check for a link, some PHYs don't provide a clock if
1664 * no link is present. Some EMACs will not come out of
1665 * soft reset without a PHY clock present.
1667 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1668 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1671 if (emacp->em0mr0 & EMAC_M0_SRST) {
1672 /*not sure what to do here hopefully it clears before another open */
1674 "%s: Phy SoftReset didn't clear, no link?\n",
1679 /* Free the irq's */
1680 free_irq(dev->irq, dev);
1682 spin_unlock_irq(&fep->lock);
1687 static void emac_remove(struct ocp_device *ocpdev)
1689 struct net_device *dev = ocp_get_drvdata(ocpdev);
1690 struct ocp_enet_private *ep = dev->priv;
1692 /* FIXME: locking, races, ... */
1694 ocp_set_drvdata(ocpdev, NULL);
1696 emac_close_rgmii(ep->rgmii_dev);
1698 emac_close_zmii(ep->zmii_dev);
1700 unregister_netdev(dev);
1701 del_timer_sync(&ep->link_timer);
1702 mal_unregister_commac(ep->mal, &ep->commac);
1703 iounmap((void *)ep->emacp);
1707 struct mal_commac_ops emac_commac_ops = {
1708 .txeob = &emac_txeob_dev,
1709 .txde = &emac_txde_dev,
1710 .rxeob = &emac_rxeob_dev,
1711 .rxde = &emac_rxde_dev,
1714 #ifdef CONFIG_NET_POLL_CONTROLLER
1715 static int emac_netpoll(struct net_device *ndev)
1717 emac_rxeob_dev((void *)ndev, 0);
1718 emac_txeob_dev((void *)ndev, 0);
1723 static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1725 int deferred_init = 0;
1727 struct net_device *ndev;
1728 struct ocp_enet_private *ep;
1729 struct ocp_func_emac_data *emacdata;
1733 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1735 printk(KERN_ERR "emac%d: Missing additional data!\n",
1736 ocpdev->def->index);
1740 /* Allocate our net_device structure */
1741 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1744 "emac%d: Could not allocate ethernet device.\n",
1745 ocpdev->def->index);
1750 ep->ocpdev = ocpdev;
1751 ndev->irq = ocpdev->def->irq;
1752 ep->wol_irq = emacdata->wol_irq;
1753 if (emacdata->mdio_idx >= 0) {
1754 if (emacdata->mdio_idx == ocpdev->def->index) {
1755 /* Set the common MDIO net_device */
1759 ep->mdio_dev = mdio_ndev;
1761 ep->mdio_dev = ndev;
1764 ocp_set_drvdata(ocpdev, ndev);
1766 spin_lock_init(&ep->lock);
1768 /* Fill out MAL informations and register commac */
1770 ep->mal_tx_chan = emacdata->mal_tx_chan;
1771 ep->mal_rx_chan = emacdata->mal_rx_chan;
1772 ep->commac.ops = &emac_commac_ops;
1773 ep->commac.dev = ndev;
1774 ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan);
1775 ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan);
1776 rc = mal_register_commac(ep->mal, &ep->commac);
1782 ep->emacp = (emac_t *) ioremap(ocpdev->def->paddr, sizeof(emac_t));
1784 /* Check if we need to attach to a ZMII */
1785 if (emacdata->zmii_idx >= 0) {
1786 ep->zmii_input = emacdata->zmii_mux;
1788 ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII,
1789 emacdata->zmii_idx);
1790 if (ep->zmii_dev == NULL)
1792 "emac%d: ZMII %d requested but not found !\n",
1793 ocpdev->def->index, emacdata->zmii_idx);
1795 emac_init_zmii(ep->zmii_dev, ep->zmii_input,
1796 emacdata->phy_mode)) != 0)
1800 /* Check if we need to attach to a RGMII */
1801 if (emacdata->rgmii_idx >= 0) {
1802 ep->rgmii_input = emacdata->rgmii_mux;
1804 ocp_find_device(OCP_ANY_ID, OCP_FUNC_RGMII,
1805 emacdata->rgmii_idx);
1806 if (ep->rgmii_dev == NULL)
1808 "emac%d: RGMII %d requested but not found !\n",
1809 ocpdev->def->index, emacdata->rgmii_idx);
1811 emac_init_rgmii(ep->rgmii_dev, ep->rgmii_input,
1812 emacdata->phy_mode)) != 0)
1816 /* Check if we need to attach to a TAH */
1817 if (emacdata->tah_idx >= 0) {
1819 ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH,
1821 if (ep->tah_dev == NULL)
1823 "emac%d: TAH %d requested but not found !\n",
1824 ocpdev->def->index, emacdata->tah_idx);
1825 else if ((rc = emac_init_tah(ep)) != 0)
1829 if (deferred_init) {
1830 if (!list_empty(&emac_init_list)) {
1831 struct list_head *entry;
1832 struct emac_def_dev *ddev;
1834 list_for_each(entry, &emac_init_list) {
1836 list_entry(entry, struct emac_def_dev,
1838 emac_init_device(ddev->ocpdev, ddev->mal);
1843 /* Init link monitoring timer */
1844 init_timer(&ep->link_timer);
1845 ep->link_timer.function = emac_link_timer;
1846 ep->link_timer.data = (unsigned long)ep;
1847 ep->timer_ticks = 0;
1849 /* Fill up the mii_phy structure */
1850 ep->phy_mii.dev = ndev;
1851 ep->phy_mii.mdio_read = emac_phy_read;
1852 ep->phy_mii.mdio_write = emac_phy_write;
1853 ep->phy_mii.mode = emacdata->phy_mode;
1856 phy_map = emacdata->phy_map | busy_phy_map;
1857 for (i = 0; i <= 0x1f; i++, phy_map >>= 1) {
1858 if ((phy_map & 0x1) == 0) {
1859 int val = emac_phy_read(ndev, i, MII_BMCR);
1860 if (val != 0xffff && val != -1)
1865 printk(KERN_WARNING "emac%d: Can't find PHY.\n",
1866 ocpdev->def->index);
1870 busy_phy_map |= 1 << i;
1871 ep->mii_phy_addr = i;
1872 rc = mii_phy_probe(&ep->phy_mii, i);
1874 printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n",
1875 ocpdev->def->index);
1880 /* Setup initial PHY config & startup aneg */
1881 if (ep->phy_mii.def->ops->init)
1882 ep->phy_mii.def->ops->init(&ep->phy_mii);
1883 netif_carrier_off(ndev);
1884 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1885 ep->want_autoneg = 1;
1886 emac_start_link(ep, NULL);
1888 /* read the MAC Address */
1889 for (i = 0; i < 6; i++)
1890 ndev->dev_addr[i] = emacdata->mac_addr[i];
1892 /* Fill in the driver function table */
1893 ndev->open = &emac_open;
1894 ndev->hard_start_xmit = &emac_start_xmit;
1895 ndev->stop = &emac_close;
1896 ndev->get_stats = &emac_stats;
1897 if (emacdata->jumbo)
1898 ndev->change_mtu = &emac_change_mtu;
1899 ndev->set_mac_address = &emac_set_mac_address;
1900 ndev->set_multicast_list = &emac_set_multicast_list;
1901 ndev->do_ioctl = &emac_ioctl;
1902 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
1903 if (emacdata->tah_idx >= 0)
1904 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1905 #ifdef CONFIG_NET_POLL_CONTROLLER
1906 ndev->poll_controller = emac_netpoll;
1909 SET_MODULE_OWNER(ndev);
1911 rc = register_netdev(ndev);
1915 printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
1917 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1918 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1919 printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
1920 ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
1923 if (rc && commac_reg)
1924 mal_unregister_commac(ep->mal, &ep->commac);
1931 static int emac_probe(struct ocp_device *ocpdev)
1933 struct ocp_device *maldev;
1934 struct ibm_ocp_mal *mal;
1935 struct ocp_func_emac_data *emacdata;
1937 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1938 if (emacdata == NULL) {
1939 printk(KERN_ERR "emac%d: Missing additional datas !\n",
1940 ocpdev->def->index);
1944 /* Get the MAL device */
1945 maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
1946 if (maldev == NULL) {
1947 printk("No maldev\n");
1951 * Get MAL driver data, it must be here due to link order.
1952 * When the driver is modularized, symbol dependencies will
1953 * ensure the MAL driver is already present if built as a
1956 mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
1958 printk("No maldrv\n");
1962 /* If we depend on another EMAC for MDIO, wait for it to show up */
1963 if (emacdata->mdio_idx >= 0 &&
1964 (emacdata->mdio_idx != ocpdev->def->index) && !mdio_ndev) {
1965 struct emac_def_dev *ddev;
1966 /* Add this index to the deferred init table */
1967 ddev = kmalloc(sizeof(struct emac_def_dev), GFP_KERNEL);
1968 ddev->ocpdev = ocpdev;
1970 list_add_tail(&ddev->link, &emac_init_list);
1972 emac_init_device(ocpdev, mal);
1978 /* Structure for a device driver */
1979 static struct ocp_device_id emac_ids[] = {
1980 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_EMAC},
1981 {.vendor = OCP_VENDOR_INVALID}
1984 static struct ocp_driver emac_driver = {
1986 .id_table = emac_ids,
1988 .probe = emac_probe,
1989 .remove = emac_remove,
1992 static int __init emac_init(void)
1994 printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n");
1995 printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n");
1998 printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n",
2003 return ocp_register_driver(&emac_driver);
2006 static void __exit emac_exit(void)
2008 ocp_unregister_driver(&emac_driver);
2011 module_init(emac_init);
2012 module_exit(emac_exit);