2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/ptrace.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/bitops.h>
37 #include <linux/platform_device.h>
38 #include <linux/phy.h>
40 #include <linux/vmalloc.h>
41 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
47 /*************************************************/
49 static char version[] __devinitdata =
50 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
52 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
53 MODULE_DESCRIPTION("Freescale Ethernet Driver");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(DRV_MODULE_VERSION);
57 int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
58 module_param(fs_enet_debug, int, 0);
59 MODULE_PARM_DESC(fs_enet_debug,
60 "Freescale bitmapped debugging message enable value");
62 #ifdef CONFIG_NET_POLL_CONTROLLER
63 static void fs_enet_netpoll(struct net_device *dev);
66 static void fs_set_multicast_list(struct net_device *dev)
68 struct fs_enet_private *fep = netdev_priv(dev);
70 (*fep->ops->set_multicast_list)(dev);
73 static void skb_align(struct sk_buff *skb, int align)
75 int off = ((unsigned long)skb->data) & (align - 1);
78 skb_reserve(skb, align - off);
81 /* NAPI receive function */
82 static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
84 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
85 struct net_device *dev = to_net_dev(fep->dev);
86 const struct fs_platform_info *fpi = fep->fpi;
88 struct sk_buff *skb, *skbn, *skbt;
93 if (!netif_running(dev))
97 * First, grab all of the stats for the incoming packet.
98 * These get messed up if we get called due to a busy condition.
102 /* clear RX status bits for napi*/
103 (*fep->ops->napi_clear_rx_event)(dev);
105 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
106 curidx = bdp - fep->rx_bd_base;
109 * Since we have allocated space to hold a complete frame,
110 * the last indicator should be set.
112 if ((sc & BD_ENET_RX_LAST) == 0)
113 printk(KERN_WARNING DRV_MODULE_NAME
114 ": %s rcv is not +last\n",
120 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
121 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
122 fep->stats.rx_errors++;
123 /* Frame too long or too short. */
124 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
125 fep->stats.rx_length_errors++;
126 /* Frame alignment */
127 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
128 fep->stats.rx_frame_errors++;
130 if (sc & BD_ENET_RX_CR)
131 fep->stats.rx_crc_errors++;
133 if (sc & BD_ENET_RX_OV)
134 fep->stats.rx_crc_errors++;
136 skb = fep->rx_skbuff[curidx];
138 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
139 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
145 skb = fep->rx_skbuff[curidx];
147 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
152 * Process the incoming frame.
154 fep->stats.rx_packets++;
155 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156 fep->stats.rx_bytes += pkt_len + 4;
158 if (pkt_len <= fpi->rx_copybreak) {
159 /* +2 to make IP header L1 cache aligned */
160 skbn = dev_alloc_skb(pkt_len + 2);
162 skb_reserve(skbn, 2); /* align IP header */
163 skb_copy_from_linear_data(skb,
164 skbn->data, pkt_len);
171 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
174 skb_align(skbn, ENET_RX_ALIGN);
178 skb_put(skb, pkt_len); /* Make room */
179 skb->protocol = eth_type_trans(skb, dev);
181 netif_receive_skb(skb);
183 printk(KERN_WARNING DRV_MODULE_NAME
184 ": %s Memory squeeze, dropping packet.\n",
186 fep->stats.rx_dropped++;
191 fep->rx_skbuff[curidx] = skbn;
192 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
193 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
196 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
199 * Update BD pointer to next entry.
201 if ((sc & BD_ENET_RX_WRAP) == 0)
204 bdp = fep->rx_bd_base;
206 (*fep->ops->rx_bd_done)(dev);
208 if (received >= budget)
214 if (received >= budget) {
216 netif_rx_complete(dev, napi);
217 (*fep->ops->napi_enable_rx)(dev);
222 /* non NAPI receive function */
223 static int fs_enet_rx_non_napi(struct net_device *dev)
225 struct fs_enet_private *fep = netdev_priv(dev);
226 const struct fs_platform_info *fpi = fep->fpi;
228 struct sk_buff *skb, *skbn, *skbt;
233 * First, grab all of the stats for the incoming packet.
234 * These get messed up if we get called due to a busy condition.
238 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
240 curidx = bdp - fep->rx_bd_base;
243 * Since we have allocated space to hold a complete frame,
244 * the last indicator should be set.
246 if ((sc & BD_ENET_RX_LAST) == 0)
247 printk(KERN_WARNING DRV_MODULE_NAME
248 ": %s rcv is not +last\n",
254 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
255 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
256 fep->stats.rx_errors++;
257 /* Frame too long or too short. */
258 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
259 fep->stats.rx_length_errors++;
260 /* Frame alignment */
261 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
262 fep->stats.rx_frame_errors++;
264 if (sc & BD_ENET_RX_CR)
265 fep->stats.rx_crc_errors++;
267 if (sc & BD_ENET_RX_OV)
268 fep->stats.rx_crc_errors++;
270 skb = fep->rx_skbuff[curidx];
272 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
273 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
280 skb = fep->rx_skbuff[curidx];
282 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
283 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
287 * Process the incoming frame.
289 fep->stats.rx_packets++;
290 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
291 fep->stats.rx_bytes += pkt_len + 4;
293 if (pkt_len <= fpi->rx_copybreak) {
294 /* +2 to make IP header L1 cache aligned */
295 skbn = dev_alloc_skb(pkt_len + 2);
297 skb_reserve(skbn, 2); /* align IP header */
298 skb_copy_from_linear_data(skb,
299 skbn->data, pkt_len);
306 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
309 skb_align(skbn, ENET_RX_ALIGN);
313 skb_put(skb, pkt_len); /* Make room */
314 skb->protocol = eth_type_trans(skb, dev);
318 printk(KERN_WARNING DRV_MODULE_NAME
319 ": %s Memory squeeze, dropping packet.\n",
321 fep->stats.rx_dropped++;
326 fep->rx_skbuff[curidx] = skbn;
327 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
328 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
331 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
334 * Update BD pointer to next entry.
336 if ((sc & BD_ENET_RX_WRAP) == 0)
339 bdp = fep->rx_bd_base;
341 (*fep->ops->rx_bd_done)(dev);
349 static void fs_enet_tx(struct net_device *dev)
351 struct fs_enet_private *fep = netdev_priv(dev);
354 int dirtyidx, do_wake, do_restart;
357 spin_lock(&fep->tx_lock);
360 do_wake = do_restart = 0;
361 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
362 dirtyidx = bdp - fep->tx_bd_base;
364 if (fep->tx_free == fep->tx_ring)
367 skb = fep->tx_skbuff[dirtyidx];
372 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
373 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
375 if (sc & BD_ENET_TX_HB) /* No heartbeat */
376 fep->stats.tx_heartbeat_errors++;
377 if (sc & BD_ENET_TX_LC) /* Late collision */
378 fep->stats.tx_window_errors++;
379 if (sc & BD_ENET_TX_RL) /* Retrans limit */
380 fep->stats.tx_aborted_errors++;
381 if (sc & BD_ENET_TX_UN) /* Underrun */
382 fep->stats.tx_fifo_errors++;
383 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
384 fep->stats.tx_carrier_errors++;
386 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
387 fep->stats.tx_errors++;
391 fep->stats.tx_packets++;
393 if (sc & BD_ENET_TX_READY)
394 printk(KERN_WARNING DRV_MODULE_NAME
395 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
399 * Deferred means some collisions occurred during transmit,
400 * but we eventually sent the packet OK.
402 if (sc & BD_ENET_TX_DEF)
403 fep->stats.collisions++;
406 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
407 skb->len, DMA_TO_DEVICE);
410 * Free the sk buffer associated with this last transmit.
412 dev_kfree_skb_irq(skb);
413 fep->tx_skbuff[dirtyidx] = NULL;
416 * Update pointer to next buffer descriptor to be transmitted.
418 if ((sc & BD_ENET_TX_WRAP) == 0)
421 bdp = fep->tx_bd_base;
424 * Since we have freed up a buffer, the ring is no longer
434 (*fep->ops->tx_restart)(dev);
436 spin_unlock(&fep->tx_lock);
439 netif_wake_queue(dev);
443 * The interrupt handler.
444 * This is called from the MPC core interrupt.
447 fs_enet_interrupt(int irq, void *dev_id)
449 struct net_device *dev = dev_id;
450 struct fs_enet_private *fep;
451 const struct fs_platform_info *fpi;
457 fep = netdev_priv(dev);
461 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
464 int_clr_events = int_events;
466 int_clr_events &= ~fep->ev_napi_rx;
468 (*fep->ops->clear_int_events)(dev, int_clr_events);
470 if (int_events & fep->ev_err)
471 (*fep->ops->ev_error)(dev, int_events);
473 if (int_events & fep->ev_rx) {
475 fs_enet_rx_non_napi(dev);
477 napi_ok = napi_schedule_prep(&fep->napi);
479 (*fep->ops->napi_disable_rx)(dev);
480 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
482 /* NOTE: it is possible for FCCs in NAPI mode */
483 /* to submit a spurious interrupt while in poll */
485 __netif_rx_schedule(dev, &fep->napi);
489 if (int_events & fep->ev_tx)
494 return IRQ_RETVAL(handled);
497 void fs_init_bds(struct net_device *dev)
499 struct fs_enet_private *fep = netdev_priv(dev);
506 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
507 fep->tx_free = fep->tx_ring;
508 fep->cur_rx = fep->rx_bd_base;
511 * Initialize the receive buffer descriptors.
513 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
514 skb = dev_alloc_skb(ENET_RX_FRSIZE);
516 printk(KERN_WARNING DRV_MODULE_NAME
517 ": %s Memory squeeze, unable to allocate skb\n",
521 skb_align(skb, ENET_RX_ALIGN);
522 fep->rx_skbuff[i] = skb;
524 dma_map_single(fep->dev, skb->data,
525 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
527 CBDW_DATLEN(bdp, 0); /* zero */
528 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
529 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
532 * if we failed, fillup remainder
534 for (; i < fep->rx_ring; i++, bdp++) {
535 fep->rx_skbuff[i] = NULL;
536 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
540 * ...and the same for transmit.
542 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
543 fep->tx_skbuff[i] = NULL;
544 CBDW_BUFADDR(bdp, 0);
546 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
550 void fs_cleanup_bds(struct net_device *dev)
552 struct fs_enet_private *fep = netdev_priv(dev);
558 * Reset SKB transmit buffers.
560 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
561 if ((skb = fep->tx_skbuff[i]) == NULL)
565 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
566 skb->len, DMA_TO_DEVICE);
568 fep->tx_skbuff[i] = NULL;
573 * Reset SKB receive buffers
575 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
576 if ((skb = fep->rx_skbuff[i]) == NULL)
580 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
581 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
584 fep->rx_skbuff[i] = NULL;
590 /**********************************************************************************/
592 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
594 struct fs_enet_private *fep = netdev_priv(dev);
600 spin_lock_irqsave(&fep->tx_lock, flags);
603 * Fill in a Tx ring entry
607 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
608 netif_stop_queue(dev);
609 spin_unlock_irqrestore(&fep->tx_lock, flags);
612 * Ooops. All transmit buffers are full. Bail out.
613 * This should not happen, since the tx queue should be stopped.
615 printk(KERN_WARNING DRV_MODULE_NAME
616 ": %s tx queue full!.\n", dev->name);
617 return NETDEV_TX_BUSY;
620 curidx = bdp - fep->tx_bd_base;
622 * Clear all of the status flags.
624 CBDC_SC(bdp, BD_ENET_TX_STATS);
629 fep->tx_skbuff[curidx] = skb;
631 fep->stats.tx_bytes += skb->len;
634 * Push the data cache so the CPM does not get stale memory data.
636 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
637 skb->data, skb->len, DMA_TO_DEVICE));
638 CBDW_DATLEN(bdp, skb->len);
640 dev->trans_start = jiffies;
643 * If this was the last BD in the ring, start at the beginning again.
645 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
648 fep->cur_tx = fep->tx_bd_base;
651 netif_stop_queue(dev);
653 /* Trigger transmission start */
654 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
655 BD_ENET_TX_LAST | BD_ENET_TX_TC;
657 /* note that while FEC does not have this bit
658 * it marks it as available for software use
659 * yay for hw reuse :) */
661 sc |= BD_ENET_TX_PAD;
664 (*fep->ops->tx_kickstart)(dev);
666 spin_unlock_irqrestore(&fep->tx_lock, flags);
671 static int fs_request_irq(struct net_device *dev, int irq, const char *name,
674 struct fs_enet_private *fep = netdev_priv(dev);
676 (*fep->ops->pre_request_irq)(dev, irq);
677 return request_irq(irq, irqf, IRQF_SHARED, name, dev);
680 static void fs_free_irq(struct net_device *dev, int irq)
682 struct fs_enet_private *fep = netdev_priv(dev);
685 (*fep->ops->post_free_irq)(dev, irq);
688 static void fs_timeout(struct net_device *dev)
690 struct fs_enet_private *fep = netdev_priv(dev);
694 fep->stats.tx_errors++;
696 spin_lock_irqsave(&fep->lock, flags);
698 if (dev->flags & IFF_UP) {
699 phy_stop(fep->phydev);
700 (*fep->ops->stop)(dev);
701 (*fep->ops->restart)(dev);
702 phy_start(fep->phydev);
705 phy_start(fep->phydev);
706 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
707 spin_unlock_irqrestore(&fep->lock, flags);
710 netif_wake_queue(dev);
713 /*-----------------------------------------------------------------------------
714 * generic link-change handler - should be sufficient for most cases
715 *-----------------------------------------------------------------------------*/
716 static void generic_adjust_link(struct net_device *dev)
718 struct fs_enet_private *fep = netdev_priv(dev);
719 struct phy_device *phydev = fep->phydev;
723 /* adjust to duplex mode */
724 if (phydev->duplex != fep->oldduplex) {
726 fep->oldduplex = phydev->duplex;
729 if (phydev->speed != fep->oldspeed) {
731 fep->oldspeed = phydev->speed;
738 netif_carrier_on(dev);
739 netif_start_queue(dev);
743 fep->ops->restart(dev);
744 } else if (fep->oldlink) {
749 netif_carrier_off(dev);
750 netif_stop_queue(dev);
753 if (new_state && netif_msg_link(fep))
754 phy_print_status(phydev);
758 static void fs_adjust_link(struct net_device *dev)
760 struct fs_enet_private *fep = netdev_priv(dev);
763 spin_lock_irqsave(&fep->lock, flags);
765 if(fep->ops->adjust_link)
766 fep->ops->adjust_link(dev);
768 generic_adjust_link(dev);
770 spin_unlock_irqrestore(&fep->lock, flags);
773 static int fs_init_phy(struct net_device *dev)
775 struct fs_enet_private *fep = netdev_priv(dev);
776 struct phy_device *phydev;
782 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
783 PHY_INTERFACE_MODE_MII);
785 printk("No phy bus ID specified in BSP code\n");
788 if (IS_ERR(phydev)) {
789 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
790 return PTR_ERR(phydev);
793 fep->phydev = phydev;
798 static int fs_enet_open(struct net_device *dev)
800 struct fs_enet_private *fep = netdev_priv(dev);
804 napi_enable(&fep->napi);
806 /* Install our interrupt handler. */
807 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
809 printk(KERN_ERR DRV_MODULE_NAME
810 ": %s Could not allocate FS_ENET IRQ!", dev->name);
811 napi_disable(&fep->napi);
815 err = fs_init_phy(dev);
817 napi_disable(&fep->napi);
820 phy_start(fep->phydev);
825 static int fs_enet_close(struct net_device *dev)
827 struct fs_enet_private *fep = netdev_priv(dev);
830 netif_stop_queue(dev);
831 netif_carrier_off(dev);
832 napi_disable(&fep->napi);
833 phy_stop(fep->phydev);
835 spin_lock_irqsave(&fep->lock, flags);
836 spin_lock(&fep->tx_lock);
837 (*fep->ops->stop)(dev);
838 spin_unlock(&fep->tx_lock);
839 spin_unlock_irqrestore(&fep->lock, flags);
841 /* release any irqs */
842 phy_disconnect(fep->phydev);
844 fs_free_irq(dev, fep->interrupt);
849 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
851 struct fs_enet_private *fep = netdev_priv(dev);
855 /*************************************************************************/
857 static void fs_get_drvinfo(struct net_device *dev,
858 struct ethtool_drvinfo *info)
860 strcpy(info->driver, DRV_MODULE_NAME);
861 strcpy(info->version, DRV_MODULE_VERSION);
864 static int fs_get_regs_len(struct net_device *dev)
866 struct fs_enet_private *fep = netdev_priv(dev);
868 return (*fep->ops->get_regs_len)(dev);
871 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
874 struct fs_enet_private *fep = netdev_priv(dev);
880 spin_lock_irqsave(&fep->lock, flags);
881 r = (*fep->ops->get_regs)(dev, p, &len);
882 spin_unlock_irqrestore(&fep->lock, flags);
888 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
890 struct fs_enet_private *fep = netdev_priv(dev);
891 return phy_ethtool_gset(fep->phydev, cmd);
894 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
896 struct fs_enet_private *fep = netdev_priv(dev);
897 phy_ethtool_sset(fep->phydev, cmd);
901 static int fs_nway_reset(struct net_device *dev)
906 static u32 fs_get_msglevel(struct net_device *dev)
908 struct fs_enet_private *fep = netdev_priv(dev);
909 return fep->msg_enable;
912 static void fs_set_msglevel(struct net_device *dev, u32 value)
914 struct fs_enet_private *fep = netdev_priv(dev);
915 fep->msg_enable = value;
918 static const struct ethtool_ops fs_ethtool_ops = {
919 .get_drvinfo = fs_get_drvinfo,
920 .get_regs_len = fs_get_regs_len,
921 .get_settings = fs_get_settings,
922 .set_settings = fs_set_settings,
923 .nway_reset = fs_nway_reset,
924 .get_link = ethtool_op_get_link,
925 .get_msglevel = fs_get_msglevel,
926 .set_msglevel = fs_set_msglevel,
927 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
928 .set_sg = ethtool_op_set_sg,
929 .get_regs = fs_get_regs,
932 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
934 struct fs_enet_private *fep = netdev_priv(dev);
935 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
939 if (!netif_running(dev))
942 spin_lock_irqsave(&fep->lock, flags);
943 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
944 spin_unlock_irqrestore(&fep->lock, flags);
948 extern int fs_mii_connect(struct net_device *dev);
949 extern void fs_mii_disconnect(struct net_device *dev);
951 static struct net_device *fs_init_instance(struct device *dev,
952 struct fs_platform_info *fpi)
954 struct net_device *ndev = NULL;
955 struct fs_enet_private *fep = NULL;
956 int privsize, i, r, err = 0, registered = 0;
958 fpi->fs_no = fs_get_id(fpi);
960 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
961 return ERR_PTR(-EINVAL);
963 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
964 (fpi->rx_ring + fpi->tx_ring));
966 ndev = alloc_etherdev(privsize);
972 fep = netdev_priv(ndev);
975 dev_set_drvdata(dev, ndev);
977 if (fpi->init_ioports)
978 fpi->init_ioports((struct fs_platform_info *)fpi);
980 #ifdef CONFIG_FS_ENET_HAS_FEC
981 if (fs_get_fec_index(fpi->fs_no) >= 0)
982 fep->ops = &fs_fec_ops;
985 #ifdef CONFIG_FS_ENET_HAS_SCC
986 if (fs_get_scc_index(fpi->fs_no) >=0)
987 fep->ops = &fs_scc_ops;
990 #ifdef CONFIG_FS_ENET_HAS_FCC
991 if (fs_get_fcc_index(fpi->fs_no) >= 0)
992 fep->ops = &fs_fcc_ops;
995 if (fep->ops == NULL) {
996 printk(KERN_ERR DRV_MODULE_NAME
997 ": %s No matching ops found (%d).\n",
998 ndev->name, fpi->fs_no);
1003 r = (*fep->ops->setup_data)(ndev);
1005 printk(KERN_ERR DRV_MODULE_NAME
1006 ": %s setup_data failed\n",
1012 /* point rx_skbuff, tx_skbuff */
1013 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1014 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1017 spin_lock_init(&fep->lock);
1018 spin_lock_init(&fep->tx_lock);
1021 * Set the Ethernet address.
1023 for (i = 0; i < 6; i++)
1024 ndev->dev_addr[i] = fpi->macaddr[i];
1026 r = (*fep->ops->allocate_bd)(ndev);
1028 if (fep->ring_base == NULL) {
1029 printk(KERN_ERR DRV_MODULE_NAME
1030 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
1036 * Set receive and transmit descriptor base.
1038 fep->rx_bd_base = fep->ring_base;
1039 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1041 /* initialize ring size variables */
1042 fep->tx_ring = fpi->tx_ring;
1043 fep->rx_ring = fpi->rx_ring;
1046 * The FEC Ethernet specific entries in the device structure.
1048 ndev->open = fs_enet_open;
1049 ndev->hard_start_xmit = fs_enet_start_xmit;
1050 ndev->tx_timeout = fs_timeout;
1051 ndev->watchdog_timeo = 2 * HZ;
1052 ndev->stop = fs_enet_close;
1053 ndev->get_stats = fs_enet_get_stats;
1054 ndev->set_multicast_list = fs_set_multicast_list;
1056 #ifdef CONFIG_NET_POLL_CONTROLLER
1057 ndev->poll_controller = fs_enet_netpoll;
1060 netif_napi_add(ndev, &fep->napi,
1061 fs_enet_rx_napi, fpi->napi_weight);
1063 ndev->ethtool_ops = &fs_ethtool_ops;
1064 ndev->do_ioctl = fs_ioctl;
1066 init_timer(&fep->phy_timer_list);
1068 netif_carrier_off(ndev);
1070 err = register_netdev(ndev);
1072 printk(KERN_ERR DRV_MODULE_NAME
1073 ": %s register_netdev failed.\n", ndev->name);
1084 unregister_netdev(ndev);
1087 (*fep->ops->free_bd)(ndev);
1088 (*fep->ops->cleanup_data)(ndev);
1094 dev_set_drvdata(dev, NULL);
1096 return ERR_PTR(err);
1099 static int fs_cleanup_instance(struct net_device *ndev)
1101 struct fs_enet_private *fep;
1102 const struct fs_platform_info *fpi;
1108 fep = netdev_priv(ndev);
1114 unregister_netdev(ndev);
1116 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1117 fep->ring_base, fep->ring_mem_addr);
1120 (*fep->ops->cleanup_data)(ndev);
1124 dev_set_drvdata(dev, NULL);
1133 /**************************************************************************************/
1135 /* handy pointer to the immap */
1136 void *fs_enet_immap = NULL;
1138 static int setup_immap(void)
1140 phys_addr_t paddr = 0;
1141 unsigned long size = 0;
1145 size = 0x10000; /* map 64K */
1149 paddr = CPM_MAP_ADDR;
1150 size = 0x40000; /* map 256 K */
1152 fs_enet_immap = ioremap(paddr, size);
1153 if (fs_enet_immap == NULL)
1154 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1159 static void cleanup_immap(void)
1161 if (fs_enet_immap != NULL) {
1162 iounmap(fs_enet_immap);
1163 fs_enet_immap = NULL;
1167 /**************************************************************************************/
1169 static int __devinit fs_enet_probe(struct device *dev)
1171 struct net_device *ndev;
1173 /* no fixup - no device */
1174 if (dev->platform_data == NULL) {
1175 printk(KERN_INFO "fs_enet: "
1176 "probe called with no platform data; "
1177 "remove unused devices\n");
1181 ndev = fs_init_instance(dev, dev->platform_data);
1183 return PTR_ERR(ndev);
1187 static int fs_enet_remove(struct device *dev)
1189 return fs_cleanup_instance(dev_get_drvdata(dev));
1192 static struct device_driver fs_enet_fec_driver = {
1193 .name = "fsl-cpm-fec",
1194 .bus = &platform_bus_type,
1195 .probe = fs_enet_probe,
1196 .remove = fs_enet_remove,
1198 /* .suspend = fs_enet_suspend, TODO */
1199 /* .resume = fs_enet_resume, TODO */
1203 static struct device_driver fs_enet_scc_driver = {
1204 .name = "fsl-cpm-scc",
1205 .bus = &platform_bus_type,
1206 .probe = fs_enet_probe,
1207 .remove = fs_enet_remove,
1209 /* .suspend = fs_enet_suspend, TODO */
1210 /* .resume = fs_enet_resume, TODO */
1214 static struct device_driver fs_enet_fcc_driver = {
1215 .name = "fsl-cpm-fcc",
1216 .bus = &platform_bus_type,
1217 .probe = fs_enet_probe,
1218 .remove = fs_enet_remove,
1220 /* .suspend = fs_enet_suspend, TODO */
1221 /* .resume = fs_enet_resume, TODO */
1225 static int __init fs_init(void)
1236 #ifdef CONFIG_FS_ENET_HAS_FCC
1237 /* let's insert mii stuff */
1238 r = fs_enet_mdio_bb_init();
1241 printk(KERN_ERR DRV_MODULE_NAME
1242 "BB PHY init failed.\n");
1245 r = driver_register(&fs_enet_fcc_driver);
1250 #ifdef CONFIG_FS_ENET_HAS_FEC
1251 r = fs_enet_mdio_fec_init();
1253 printk(KERN_ERR DRV_MODULE_NAME
1254 "FEC PHY init failed.\n");
1258 r = driver_register(&fs_enet_fec_driver);
1263 #ifdef CONFIG_FS_ENET_HAS_SCC
1264 r = driver_register(&fs_enet_scc_driver);
1275 static void __exit fs_cleanup(void)
1277 driver_unregister(&fs_enet_fec_driver);
1278 driver_unregister(&fs_enet_fcc_driver);
1279 driver_unregister(&fs_enet_scc_driver);
1283 #ifdef CONFIG_NET_POLL_CONTROLLER
1284 static void fs_enet_netpoll(struct net_device *dev)
1286 disable_irq(dev->irq);
1287 fs_enet_interrupt(dev->irq, dev, NULL);
1288 enable_irq(dev->irq);
1292 /**************************************************************************************/
1294 module_init(fs_init);
1295 module_exit(fs_cleanup);