Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[linux-2.6] / drivers / net / fs_enet / fs_enet-main.c
1 /*
2  * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11  * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12  *
13  * This file is licensed under the terms of the GNU General Public License
14  * version 2. This program is licensed "as is" without any warranty of any
15  * kind, whether express or implied.
16  */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/ptrace.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/bitops.h>
36 #include <linux/fs.h>
37 #include <linux/platform_device.h>
38 #include <linux/phy.h>
39
40 #include <linux/vmalloc.h>
41 #include <asm/pgtable.h>
42 #include <asm/irq.h>
43 #include <asm/uaccess.h>
44
45 #ifdef CONFIG_PPC_CPM_NEW_BINDING
46 #include <asm/of_platform.h>
47 #endif
48
49 #include "fs_enet.h"
50
51 /*************************************************/
52
53 #ifndef CONFIG_PPC_CPM_NEW_BINDING
54 static char version[] __devinitdata =
55     DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
56 #endif
57
58 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
59 MODULE_DESCRIPTION("Freescale Ethernet Driver");
60 MODULE_LICENSE("GPL");
61 MODULE_VERSION(DRV_MODULE_VERSION);
62
63 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
64 module_param(fs_enet_debug, int, 0);
65 MODULE_PARM_DESC(fs_enet_debug,
66                  "Freescale bitmapped debugging message enable value");
67
68 #ifdef CONFIG_NET_POLL_CONTROLLER
69 static void fs_enet_netpoll(struct net_device *dev);
70 #endif
71
72 static void fs_set_multicast_list(struct net_device *dev)
73 {
74         struct fs_enet_private *fep = netdev_priv(dev);
75
76         (*fep->ops->set_multicast_list)(dev);
77 }
78
79 static void skb_align(struct sk_buff *skb, int align)
80 {
81         int off = ((unsigned long)skb->data) & (align - 1);
82
83         if (off)
84                 skb_reserve(skb, align - off);
85 }
86
87 /* NAPI receive function */
88 static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
89 {
90         struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
91         struct net_device *dev = fep->ndev;
92         const struct fs_platform_info *fpi = fep->fpi;
93         cbd_t __iomem *bdp;
94         struct sk_buff *skb, *skbn, *skbt;
95         int received = 0;
96         u16 pkt_len, sc;
97         int curidx;
98
99         if (!netif_running(dev))
100                 return 0;
101
102         /*
103          * First, grab all of the stats for the incoming packet.
104          * These get messed up if we get called due to a busy condition.
105          */
106         bdp = fep->cur_rx;
107
108         /* clear RX status bits for napi*/
109         (*fep->ops->napi_clear_rx_event)(dev);
110
111         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
112                 curidx = bdp - fep->rx_bd_base;
113
114                 /*
115                  * Since we have allocated space to hold a complete frame,
116                  * the last indicator should be set.
117                  */
118                 if ((sc & BD_ENET_RX_LAST) == 0)
119                         printk(KERN_WARNING DRV_MODULE_NAME
120                                ": %s rcv is not +last\n",
121                                dev->name);
122
123                 /*
124                  * Check for errors.
125                  */
126                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
127                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
128                         fep->stats.rx_errors++;
129                         /* Frame too long or too short. */
130                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
131                                 fep->stats.rx_length_errors++;
132                         /* Frame alignment */
133                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
134                                 fep->stats.rx_frame_errors++;
135                         /* CRC Error */
136                         if (sc & BD_ENET_RX_CR)
137                                 fep->stats.rx_crc_errors++;
138                         /* FIFO overrun */
139                         if (sc & BD_ENET_RX_OV)
140                                 fep->stats.rx_crc_errors++;
141
142                         skb = fep->rx_skbuff[curidx];
143
144                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
145                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
146                                 DMA_FROM_DEVICE);
147
148                         skbn = skb;
149
150                 } else {
151                         skb = fep->rx_skbuff[curidx];
152
153                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
154                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
155                                 DMA_FROM_DEVICE);
156
157                         /*
158                          * Process the incoming frame.
159                          */
160                         fep->stats.rx_packets++;
161                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
162                         fep->stats.rx_bytes += pkt_len + 4;
163
164                         if (pkt_len <= fpi->rx_copybreak) {
165                                 /* +2 to make IP header L1 cache aligned */
166                                 skbn = dev_alloc_skb(pkt_len + 2);
167                                 if (skbn != NULL) {
168                                         skb_reserve(skbn, 2);   /* align IP header */
169                                         skb_copy_from_linear_data(skb,
170                                                       skbn->data, pkt_len);
171                                         /* swap */
172                                         skbt = skb;
173                                         skb = skbn;
174                                         skbn = skbt;
175                                 }
176                         } else {
177                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
178
179                                 if (skbn)
180                                         skb_align(skbn, ENET_RX_ALIGN);
181                         }
182
183                         if (skbn != NULL) {
184                                 skb_put(skb, pkt_len);  /* Make room */
185                                 skb->protocol = eth_type_trans(skb, dev);
186                                 received++;
187                                 netif_receive_skb(skb);
188                         } else {
189                                 printk(KERN_WARNING DRV_MODULE_NAME
190                                        ": %s Memory squeeze, dropping packet.\n",
191                                        dev->name);
192                                 fep->stats.rx_dropped++;
193                                 skbn = skb;
194                         }
195                 }
196
197                 fep->rx_skbuff[curidx] = skbn;
198                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
199                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
200                              DMA_FROM_DEVICE));
201                 CBDW_DATLEN(bdp, 0);
202                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
203
204                 /*
205                  * Update BD pointer to next entry.
206                  */
207                 if ((sc & BD_ENET_RX_WRAP) == 0)
208                         bdp++;
209                 else
210                         bdp = fep->rx_bd_base;
211
212                 (*fep->ops->rx_bd_done)(dev);
213
214                 if (received >= budget)
215                         break;
216         }
217
218         fep->cur_rx = bdp;
219
220         if (received < budget) {
221                 /* done */
222                 netif_rx_complete(dev, napi);
223                 (*fep->ops->napi_enable_rx)(dev);
224         }
225         return received;
226 }
227
228 /* non NAPI receive function */
229 static int fs_enet_rx_non_napi(struct net_device *dev)
230 {
231         struct fs_enet_private *fep = netdev_priv(dev);
232         const struct fs_platform_info *fpi = fep->fpi;
233         cbd_t __iomem *bdp;
234         struct sk_buff *skb, *skbn, *skbt;
235         int received = 0;
236         u16 pkt_len, sc;
237         int curidx;
238         /*
239          * First, grab all of the stats for the incoming packet.
240          * These get messed up if we get called due to a busy condition.
241          */
242         bdp = fep->cur_rx;
243
244         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
245
246                 curidx = bdp - fep->rx_bd_base;
247
248                 /*
249                  * Since we have allocated space to hold a complete frame,
250                  * the last indicator should be set.
251                  */
252                 if ((sc & BD_ENET_RX_LAST) == 0)
253                         printk(KERN_WARNING DRV_MODULE_NAME
254                                ": %s rcv is not +last\n",
255                                dev->name);
256
257                 /*
258                  * Check for errors.
259                  */
260                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
261                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
262                         fep->stats.rx_errors++;
263                         /* Frame too long or too short. */
264                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
265                                 fep->stats.rx_length_errors++;
266                         /* Frame alignment */
267                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
268                                 fep->stats.rx_frame_errors++;
269                         /* CRC Error */
270                         if (sc & BD_ENET_RX_CR)
271                                 fep->stats.rx_crc_errors++;
272                         /* FIFO overrun */
273                         if (sc & BD_ENET_RX_OV)
274                                 fep->stats.rx_crc_errors++;
275
276                         skb = fep->rx_skbuff[curidx];
277
278                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
279                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
280                                 DMA_FROM_DEVICE);
281
282                         skbn = skb;
283
284                 } else {
285
286                         skb = fep->rx_skbuff[curidx];
287
288                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
289                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
290                                 DMA_FROM_DEVICE);
291
292                         /*
293                          * Process the incoming frame.
294                          */
295                         fep->stats.rx_packets++;
296                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
297                         fep->stats.rx_bytes += pkt_len + 4;
298
299                         if (pkt_len <= fpi->rx_copybreak) {
300                                 /* +2 to make IP header L1 cache aligned */
301                                 skbn = dev_alloc_skb(pkt_len + 2);
302                                 if (skbn != NULL) {
303                                         skb_reserve(skbn, 2);   /* align IP header */
304                                         skb_copy_from_linear_data(skb,
305                                                       skbn->data, pkt_len);
306                                         /* swap */
307                                         skbt = skb;
308                                         skb = skbn;
309                                         skbn = skbt;
310                                 }
311                         } else {
312                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
313
314                                 if (skbn)
315                                         skb_align(skbn, ENET_RX_ALIGN);
316                         }
317
318                         if (skbn != NULL) {
319                                 skb_put(skb, pkt_len);  /* Make room */
320                                 skb->protocol = eth_type_trans(skb, dev);
321                                 received++;
322                                 netif_rx(skb);
323                         } else {
324                                 printk(KERN_WARNING DRV_MODULE_NAME
325                                        ": %s Memory squeeze, dropping packet.\n",
326                                        dev->name);
327                                 fep->stats.rx_dropped++;
328                                 skbn = skb;
329                         }
330                 }
331
332                 fep->rx_skbuff[curidx] = skbn;
333                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
334                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
335                              DMA_FROM_DEVICE));
336                 CBDW_DATLEN(bdp, 0);
337                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
338
339                 /*
340                  * Update BD pointer to next entry.
341                  */
342                 if ((sc & BD_ENET_RX_WRAP) == 0)
343                         bdp++;
344                 else
345                         bdp = fep->rx_bd_base;
346
347                 (*fep->ops->rx_bd_done)(dev);
348         }
349
350         fep->cur_rx = bdp;
351
352         return 0;
353 }
354
355 static void fs_enet_tx(struct net_device *dev)
356 {
357         struct fs_enet_private *fep = netdev_priv(dev);
358         cbd_t __iomem *bdp;
359         struct sk_buff *skb;
360         int dirtyidx, do_wake, do_restart;
361         u16 sc;
362
363         spin_lock(&fep->tx_lock);
364         bdp = fep->dirty_tx;
365
366         do_wake = do_restart = 0;
367         while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
368                 dirtyidx = bdp - fep->tx_bd_base;
369
370                 if (fep->tx_free == fep->tx_ring)
371                         break;
372
373                 skb = fep->tx_skbuff[dirtyidx];
374
375                 /*
376                  * Check for errors.
377                  */
378                 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
379                           BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
380
381                         if (sc & BD_ENET_TX_HB) /* No heartbeat */
382                                 fep->stats.tx_heartbeat_errors++;
383                         if (sc & BD_ENET_TX_LC) /* Late collision */
384                                 fep->stats.tx_window_errors++;
385                         if (sc & BD_ENET_TX_RL) /* Retrans limit */
386                                 fep->stats.tx_aborted_errors++;
387                         if (sc & BD_ENET_TX_UN) /* Underrun */
388                                 fep->stats.tx_fifo_errors++;
389                         if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
390                                 fep->stats.tx_carrier_errors++;
391
392                         if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
393                                 fep->stats.tx_errors++;
394                                 do_restart = 1;
395                         }
396                 } else
397                         fep->stats.tx_packets++;
398
399                 if (sc & BD_ENET_TX_READY)
400                         printk(KERN_WARNING DRV_MODULE_NAME
401                                ": %s HEY! Enet xmit interrupt and TX_READY.\n",
402                                dev->name);
403
404                 /*
405                  * Deferred means some collisions occurred during transmit,
406                  * but we eventually sent the packet OK.
407                  */
408                 if (sc & BD_ENET_TX_DEF)
409                         fep->stats.collisions++;
410
411                 /* unmap */
412                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
413                                 skb->len, DMA_TO_DEVICE);
414
415                 /*
416                  * Free the sk buffer associated with this last transmit.
417                  */
418                 dev_kfree_skb_irq(skb);
419                 fep->tx_skbuff[dirtyidx] = NULL;
420
421                 /*
422                  * Update pointer to next buffer descriptor to be transmitted.
423                  */
424                 if ((sc & BD_ENET_TX_WRAP) == 0)
425                         bdp++;
426                 else
427                         bdp = fep->tx_bd_base;
428
429                 /*
430                  * Since we have freed up a buffer, the ring is no longer
431                  * full.
432                  */
433                 if (!fep->tx_free++)
434                         do_wake = 1;
435         }
436
437         fep->dirty_tx = bdp;
438
439         if (do_restart)
440                 (*fep->ops->tx_restart)(dev);
441
442         spin_unlock(&fep->tx_lock);
443
444         if (do_wake)
445                 netif_wake_queue(dev);
446 }
447
448 /*
449  * The interrupt handler.
450  * This is called from the MPC core interrupt.
451  */
452 static irqreturn_t
453 fs_enet_interrupt(int irq, void *dev_id)
454 {
455         struct net_device *dev = dev_id;
456         struct fs_enet_private *fep;
457         const struct fs_platform_info *fpi;
458         u32 int_events;
459         u32 int_clr_events;
460         int nr, napi_ok;
461         int handled;
462
463         fep = netdev_priv(dev);
464         fpi = fep->fpi;
465
466         nr = 0;
467         while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
468                 nr++;
469
470                 int_clr_events = int_events;
471                 if (fpi->use_napi)
472                         int_clr_events &= ~fep->ev_napi_rx;
473
474                 (*fep->ops->clear_int_events)(dev, int_clr_events);
475
476                 if (int_events & fep->ev_err)
477                         (*fep->ops->ev_error)(dev, int_events);
478
479                 if (int_events & fep->ev_rx) {
480                         if (!fpi->use_napi)
481                                 fs_enet_rx_non_napi(dev);
482                         else {
483                                 napi_ok = napi_schedule_prep(&fep->napi);
484
485                                 (*fep->ops->napi_disable_rx)(dev);
486                                 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
487
488                                 /* NOTE: it is possible for FCCs in NAPI mode    */
489                                 /* to submit a spurious interrupt while in poll  */
490                                 if (napi_ok)
491                                         __netif_rx_schedule(dev, &fep->napi);
492                         }
493                 }
494
495                 if (int_events & fep->ev_tx)
496                         fs_enet_tx(dev);
497         }
498
499         handled = nr > 0;
500         return IRQ_RETVAL(handled);
501 }
502
503 void fs_init_bds(struct net_device *dev)
504 {
505         struct fs_enet_private *fep = netdev_priv(dev);
506         cbd_t __iomem *bdp;
507         struct sk_buff *skb;
508         int i;
509
510         fs_cleanup_bds(dev);
511
512         fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
513         fep->tx_free = fep->tx_ring;
514         fep->cur_rx = fep->rx_bd_base;
515
516         /*
517          * Initialize the receive buffer descriptors.
518          */
519         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
520                 skb = dev_alloc_skb(ENET_RX_FRSIZE);
521                 if (skb == NULL) {
522                         printk(KERN_WARNING DRV_MODULE_NAME
523                                ": %s Memory squeeze, unable to allocate skb\n",
524                                dev->name);
525                         break;
526                 }
527                 skb_align(skb, ENET_RX_ALIGN);
528                 fep->rx_skbuff[i] = skb;
529                 CBDW_BUFADDR(bdp,
530                         dma_map_single(fep->dev, skb->data,
531                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
532                                 DMA_FROM_DEVICE));
533                 CBDW_DATLEN(bdp, 0);    /* zero */
534                 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
535                         ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
536         }
537         /*
538          * if we failed, fillup remainder
539          */
540         for (; i < fep->rx_ring; i++, bdp++) {
541                 fep->rx_skbuff[i] = NULL;
542                 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
543         }
544
545         /*
546          * ...and the same for transmit.
547          */
548         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
549                 fep->tx_skbuff[i] = NULL;
550                 CBDW_BUFADDR(bdp, 0);
551                 CBDW_DATLEN(bdp, 0);
552                 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
553         }
554 }
555
556 void fs_cleanup_bds(struct net_device *dev)
557 {
558         struct fs_enet_private *fep = netdev_priv(dev);
559         struct sk_buff *skb;
560         cbd_t __iomem *bdp;
561         int i;
562
563         /*
564          * Reset SKB transmit buffers.
565          */
566         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
567                 if ((skb = fep->tx_skbuff[i]) == NULL)
568                         continue;
569
570                 /* unmap */
571                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
572                                 skb->len, DMA_TO_DEVICE);
573
574                 fep->tx_skbuff[i] = NULL;
575                 dev_kfree_skb(skb);
576         }
577
578         /*
579          * Reset SKB receive buffers
580          */
581         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
582                 if ((skb = fep->rx_skbuff[i]) == NULL)
583                         continue;
584
585                 /* unmap */
586                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
587                         L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
588                         DMA_FROM_DEVICE);
589
590                 fep->rx_skbuff[i] = NULL;
591
592                 dev_kfree_skb(skb);
593         }
594 }
595
596 /**********************************************************************************/
597
598 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
599 {
600         struct fs_enet_private *fep = netdev_priv(dev);
601         cbd_t __iomem *bdp;
602         int curidx;
603         u16 sc;
604         unsigned long flags;
605
606         spin_lock_irqsave(&fep->tx_lock, flags);
607
608         /*
609          * Fill in a Tx ring entry
610          */
611         bdp = fep->cur_tx;
612
613         if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
614                 netif_stop_queue(dev);
615                 spin_unlock_irqrestore(&fep->tx_lock, flags);
616
617                 /*
618                  * Ooops.  All transmit buffers are full.  Bail out.
619                  * This should not happen, since the tx queue should be stopped.
620                  */
621                 printk(KERN_WARNING DRV_MODULE_NAME
622                        ": %s tx queue full!.\n", dev->name);
623                 return NETDEV_TX_BUSY;
624         }
625
626         curidx = bdp - fep->tx_bd_base;
627         /*
628          * Clear all of the status flags.
629          */
630         CBDC_SC(bdp, BD_ENET_TX_STATS);
631
632         /*
633          * Save skb pointer.
634          */
635         fep->tx_skbuff[curidx] = skb;
636
637         fep->stats.tx_bytes += skb->len;
638
639         /*
640          * Push the data cache so the CPM does not get stale memory data.
641          */
642         CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
643                                 skb->data, skb->len, DMA_TO_DEVICE));
644         CBDW_DATLEN(bdp, skb->len);
645
646         dev->trans_start = jiffies;
647
648         /*
649          * If this was the last BD in the ring, start at the beginning again.
650          */
651         if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
652                 fep->cur_tx++;
653         else
654                 fep->cur_tx = fep->tx_bd_base;
655
656         if (!--fep->tx_free)
657                 netif_stop_queue(dev);
658
659         /* Trigger transmission start */
660         sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
661              BD_ENET_TX_LAST | BD_ENET_TX_TC;
662
663         /* note that while FEC does not have this bit
664          * it marks it as available for software use
665          * yay for hw reuse :) */
666         if (skb->len <= 60)
667                 sc |= BD_ENET_TX_PAD;
668         CBDS_SC(bdp, sc);
669
670         (*fep->ops->tx_kickstart)(dev);
671
672         spin_unlock_irqrestore(&fep->tx_lock, flags);
673
674         return NETDEV_TX_OK;
675 }
676
677 static int fs_request_irq(struct net_device *dev, int irq, const char *name,
678                 irq_handler_t irqf)
679 {
680         struct fs_enet_private *fep = netdev_priv(dev);
681
682         (*fep->ops->pre_request_irq)(dev, irq);
683         return request_irq(irq, irqf, IRQF_SHARED, name, dev);
684 }
685
686 static void fs_free_irq(struct net_device *dev, int irq)
687 {
688         struct fs_enet_private *fep = netdev_priv(dev);
689
690         free_irq(irq, dev);
691         (*fep->ops->post_free_irq)(dev, irq);
692 }
693
694 static void fs_timeout(struct net_device *dev)
695 {
696         struct fs_enet_private *fep = netdev_priv(dev);
697         unsigned long flags;
698         int wake = 0;
699
700         fep->stats.tx_errors++;
701
702         spin_lock_irqsave(&fep->lock, flags);
703
704         if (dev->flags & IFF_UP) {
705                 phy_stop(fep->phydev);
706                 (*fep->ops->stop)(dev);
707                 (*fep->ops->restart)(dev);
708                 phy_start(fep->phydev);
709         }
710
711         phy_start(fep->phydev);
712         wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
713         spin_unlock_irqrestore(&fep->lock, flags);
714
715         if (wake)
716                 netif_wake_queue(dev);
717 }
718
719 /*-----------------------------------------------------------------------------
720  *  generic link-change handler - should be sufficient for most cases
721  *-----------------------------------------------------------------------------*/
722 static void generic_adjust_link(struct  net_device *dev)
723 {
724         struct fs_enet_private *fep = netdev_priv(dev);
725         struct phy_device *phydev = fep->phydev;
726         int new_state = 0;
727
728         if (phydev->link) {
729                 /* adjust to duplex mode */
730                 if (phydev->duplex != fep->oldduplex) {
731                         new_state = 1;
732                         fep->oldduplex = phydev->duplex;
733                 }
734
735                 if (phydev->speed != fep->oldspeed) {
736                         new_state = 1;
737                         fep->oldspeed = phydev->speed;
738                 }
739
740                 if (!fep->oldlink) {
741                         new_state = 1;
742                         fep->oldlink = 1;
743                         netif_schedule(dev);
744                         netif_carrier_on(dev);
745                         netif_start_queue(dev);
746                 }
747
748                 if (new_state)
749                         fep->ops->restart(dev);
750         } else if (fep->oldlink) {
751                 new_state = 1;
752                 fep->oldlink = 0;
753                 fep->oldspeed = 0;
754                 fep->oldduplex = -1;
755                 netif_carrier_off(dev);
756                 netif_stop_queue(dev);
757         }
758
759         if (new_state && netif_msg_link(fep))
760                 phy_print_status(phydev);
761 }
762
763
764 static void fs_adjust_link(struct net_device *dev)
765 {
766         struct fs_enet_private *fep = netdev_priv(dev);
767         unsigned long flags;
768
769         spin_lock_irqsave(&fep->lock, flags);
770
771         if(fep->ops->adjust_link)
772                 fep->ops->adjust_link(dev);
773         else
774                 generic_adjust_link(dev);
775
776         spin_unlock_irqrestore(&fep->lock, flags);
777 }
778
779 static int fs_init_phy(struct net_device *dev)
780 {
781         struct fs_enet_private *fep = netdev_priv(dev);
782         struct phy_device *phydev;
783
784         fep->oldlink = 0;
785         fep->oldspeed = 0;
786         fep->oldduplex = -1;
787         if(fep->fpi->bus_id)
788                 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
789                                 PHY_INTERFACE_MODE_MII);
790         else {
791                 printk("No phy bus ID specified in BSP code\n");
792                 return -EINVAL;
793         }
794         if (IS_ERR(phydev)) {
795                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
796                 return PTR_ERR(phydev);
797         }
798
799         fep->phydev = phydev;
800
801         return 0;
802 }
803
804 static int fs_enet_open(struct net_device *dev)
805 {
806         struct fs_enet_private *fep = netdev_priv(dev);
807         int r;
808         int err;
809
810         if (fep->fpi->use_napi)
811                 napi_enable(&fep->napi);
812
813         /* Install our interrupt handler. */
814         r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
815         if (r != 0) {
816                 printk(KERN_ERR DRV_MODULE_NAME
817                        ": %s Could not allocate FS_ENET IRQ!", dev->name);
818                 if (fep->fpi->use_napi)
819                         napi_disable(&fep->napi);
820                 return -EINVAL;
821         }
822
823         err = fs_init_phy(dev);
824         if (err) {
825                 if (fep->fpi->use_napi)
826                         napi_disable(&fep->napi);
827                 return err;
828         }
829         phy_start(fep->phydev);
830
831         return 0;
832 }
833
834 static int fs_enet_close(struct net_device *dev)
835 {
836         struct fs_enet_private *fep = netdev_priv(dev);
837         unsigned long flags;
838
839         netif_stop_queue(dev);
840         netif_carrier_off(dev);
841         napi_disable(&fep->napi);
842         phy_stop(fep->phydev);
843
844         spin_lock_irqsave(&fep->lock, flags);
845         spin_lock(&fep->tx_lock);
846         (*fep->ops->stop)(dev);
847         spin_unlock(&fep->tx_lock);
848         spin_unlock_irqrestore(&fep->lock, flags);
849
850         /* release any irqs */
851         phy_disconnect(fep->phydev);
852         fep->phydev = NULL;
853         fs_free_irq(dev, fep->interrupt);
854
855         return 0;
856 }
857
858 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
859 {
860         struct fs_enet_private *fep = netdev_priv(dev);
861         return &fep->stats;
862 }
863
864 /*************************************************************************/
865
866 static void fs_get_drvinfo(struct net_device *dev,
867                             struct ethtool_drvinfo *info)
868 {
869         strcpy(info->driver, DRV_MODULE_NAME);
870         strcpy(info->version, DRV_MODULE_VERSION);
871 }
872
873 static int fs_get_regs_len(struct net_device *dev)
874 {
875         struct fs_enet_private *fep = netdev_priv(dev);
876
877         return (*fep->ops->get_regs_len)(dev);
878 }
879
880 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
881                          void *p)
882 {
883         struct fs_enet_private *fep = netdev_priv(dev);
884         unsigned long flags;
885         int r, len;
886
887         len = regs->len;
888
889         spin_lock_irqsave(&fep->lock, flags);
890         r = (*fep->ops->get_regs)(dev, p, &len);
891         spin_unlock_irqrestore(&fep->lock, flags);
892
893         if (r == 0)
894                 regs->version = 0;
895 }
896
897 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
898 {
899         struct fs_enet_private *fep = netdev_priv(dev);
900         return phy_ethtool_gset(fep->phydev, cmd);
901 }
902
903 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
904 {
905         struct fs_enet_private *fep = netdev_priv(dev);
906         phy_ethtool_sset(fep->phydev, cmd);
907         return 0;
908 }
909
910 static int fs_nway_reset(struct net_device *dev)
911 {
912         return 0;
913 }
914
915 static u32 fs_get_msglevel(struct net_device *dev)
916 {
917         struct fs_enet_private *fep = netdev_priv(dev);
918         return fep->msg_enable;
919 }
920
921 static void fs_set_msglevel(struct net_device *dev, u32 value)
922 {
923         struct fs_enet_private *fep = netdev_priv(dev);
924         fep->msg_enable = value;
925 }
926
927 static const struct ethtool_ops fs_ethtool_ops = {
928         .get_drvinfo = fs_get_drvinfo,
929         .get_regs_len = fs_get_regs_len,
930         .get_settings = fs_get_settings,
931         .set_settings = fs_set_settings,
932         .nway_reset = fs_nway_reset,
933         .get_link = ethtool_op_get_link,
934         .get_msglevel = fs_get_msglevel,
935         .set_msglevel = fs_set_msglevel,
936         .set_tx_csum = ethtool_op_set_tx_csum,  /* local! */
937         .set_sg = ethtool_op_set_sg,
938         .get_regs = fs_get_regs,
939 };
940
941 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
942 {
943         struct fs_enet_private *fep = netdev_priv(dev);
944         struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
945         unsigned long flags;
946         int rc;
947
948         if (!netif_running(dev))
949                 return -EINVAL;
950
951         spin_lock_irqsave(&fep->lock, flags);
952         rc = phy_mii_ioctl(fep->phydev, mii, cmd);
953         spin_unlock_irqrestore(&fep->lock, flags);
954         return rc;
955 }
956
957 extern int fs_mii_connect(struct net_device *dev);
958 extern void fs_mii_disconnect(struct net_device *dev);
959
960 #ifndef CONFIG_PPC_CPM_NEW_BINDING
961 static struct net_device *fs_init_instance(struct device *dev,
962                 struct fs_platform_info *fpi)
963 {
964         struct net_device *ndev = NULL;
965         struct fs_enet_private *fep = NULL;
966         int privsize, i, r, err = 0, registered = 0;
967
968         fpi->fs_no = fs_get_id(fpi);
969         /* guard */
970         if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
971                 return ERR_PTR(-EINVAL);
972
973         privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
974                             (fpi->rx_ring + fpi->tx_ring));
975
976         ndev = alloc_etherdev(privsize);
977         if (!ndev) {
978                 err = -ENOMEM;
979                 goto err;
980         }
981
982         fep = netdev_priv(ndev);
983
984         fep->dev = dev;
985         dev_set_drvdata(dev, ndev);
986         fep->fpi = fpi;
987         if (fpi->init_ioports)
988                 fpi->init_ioports((struct fs_platform_info *)fpi);
989
990 #ifdef CONFIG_FS_ENET_HAS_FEC
991         if (fs_get_fec_index(fpi->fs_no) >= 0)
992                 fep->ops = &fs_fec_ops;
993 #endif
994
995 #ifdef CONFIG_FS_ENET_HAS_SCC
996         if (fs_get_scc_index(fpi->fs_no) >=0)
997                 fep->ops = &fs_scc_ops;
998 #endif
999
1000 #ifdef CONFIG_FS_ENET_HAS_FCC
1001         if (fs_get_fcc_index(fpi->fs_no) >= 0)
1002                 fep->ops = &fs_fcc_ops;
1003 #endif
1004
1005         if (fep->ops == NULL) {
1006                 printk(KERN_ERR DRV_MODULE_NAME
1007                        ": %s No matching ops found (%d).\n",
1008                        ndev->name, fpi->fs_no);
1009                 err = -EINVAL;
1010                 goto err;
1011         }
1012
1013         r = (*fep->ops->setup_data)(ndev);
1014         if (r != 0) {
1015                 printk(KERN_ERR DRV_MODULE_NAME
1016                        ": %s setup_data failed\n",
1017                         ndev->name);
1018                 err = r;
1019                 goto err;
1020         }
1021
1022         /* point rx_skbuff, tx_skbuff */
1023         fep->rx_skbuff = (struct sk_buff **)&fep[1];
1024         fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1025
1026         /* init locks */
1027         spin_lock_init(&fep->lock);
1028         spin_lock_init(&fep->tx_lock);
1029
1030         /*
1031          * Set the Ethernet address.
1032          */
1033         for (i = 0; i < 6; i++)
1034                 ndev->dev_addr[i] = fpi->macaddr[i];
1035
1036         r = (*fep->ops->allocate_bd)(ndev);
1037
1038         if (fep->ring_base == NULL) {
1039                 printk(KERN_ERR DRV_MODULE_NAME
1040                        ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
1041                 err = r;
1042                 goto err;
1043         }
1044
1045         /*
1046          * Set receive and transmit descriptor base.
1047          */
1048         fep->rx_bd_base = fep->ring_base;
1049         fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1050
1051         /* initialize ring size variables */
1052         fep->tx_ring = fpi->tx_ring;
1053         fep->rx_ring = fpi->rx_ring;
1054
1055         /*
1056          * The FEC Ethernet specific entries in the device structure.
1057          */
1058         ndev->open = fs_enet_open;
1059         ndev->hard_start_xmit = fs_enet_start_xmit;
1060         ndev->tx_timeout = fs_timeout;
1061         ndev->watchdog_timeo = 2 * HZ;
1062         ndev->stop = fs_enet_close;
1063         ndev->get_stats = fs_enet_get_stats;
1064         ndev->set_multicast_list = fs_set_multicast_list;
1065
1066 #ifdef CONFIG_NET_POLL_CONTROLLER
1067         ndev->poll_controller = fs_enet_netpoll;
1068 #endif
1069
1070         netif_napi_add(ndev, &fep->napi,
1071                        fs_enet_rx_napi, fpi->napi_weight);
1072
1073         ndev->ethtool_ops = &fs_ethtool_ops;
1074         ndev->do_ioctl = fs_ioctl;
1075
1076         init_timer(&fep->phy_timer_list);
1077
1078         netif_carrier_off(ndev);
1079
1080         err = register_netdev(ndev);
1081         if (err != 0) {
1082                 printk(KERN_ERR DRV_MODULE_NAME
1083                        ": %s register_netdev failed.\n", ndev->name);
1084                 goto err;
1085         }
1086         registered = 1;
1087
1088
1089         return ndev;
1090
1091 err:
1092         if (ndev != NULL) {
1093                 if (registered)
1094                         unregister_netdev(ndev);
1095
1096                 if (fep != NULL) {
1097                         (*fep->ops->free_bd)(ndev);
1098                         (*fep->ops->cleanup_data)(ndev);
1099                 }
1100
1101                 free_netdev(ndev);
1102         }
1103
1104         dev_set_drvdata(dev, NULL);
1105
1106         return ERR_PTR(err);
1107 }
1108
1109 static int fs_cleanup_instance(struct net_device *ndev)
1110 {
1111         struct fs_enet_private *fep;
1112         const struct fs_platform_info *fpi;
1113         struct device *dev;
1114
1115         if (ndev == NULL)
1116                 return -EINVAL;
1117
1118         fep = netdev_priv(ndev);
1119         if (fep == NULL)
1120                 return -EINVAL;
1121
1122         fpi = fep->fpi;
1123
1124         unregister_netdev(ndev);
1125
1126         dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1127                           (void __force *)fep->ring_base, fep->ring_mem_addr);
1128
1129         /* reset it */
1130         (*fep->ops->cleanup_data)(ndev);
1131
1132         dev = fep->dev;
1133         if (dev != NULL) {
1134                 dev_set_drvdata(dev, NULL);
1135                 fep->dev = NULL;
1136         }
1137
1138         free_netdev(ndev);
1139
1140         return 0;
1141 }
1142 #endif
1143
1144 /**************************************************************************************/
1145
1146 /* handy pointer to the immap */
1147 void __iomem *fs_enet_immap = NULL;
1148
1149 static int setup_immap(void)
1150 {
1151 #ifdef CONFIG_CPM1
1152         fs_enet_immap = ioremap(IMAP_ADDR, 0x4000);
1153         WARN_ON(!fs_enet_immap);
1154 #elif defined(CONFIG_CPM2)
1155         fs_enet_immap = cpm2_immr;
1156 #endif
1157
1158         return 0;
1159 }
1160
1161 static void cleanup_immap(void)
1162 {
1163 #if defined(CONFIG_CPM1)
1164         iounmap(fs_enet_immap);
1165 #endif
1166 }
1167
1168 /**************************************************************************************/
1169
1170 #ifdef CONFIG_PPC_CPM_NEW_BINDING
1171 static int __devinit find_phy(struct device_node *np,
1172                               struct fs_platform_info *fpi)
1173 {
1174         struct device_node *phynode, *mdionode;
1175         struct resource res;
1176         int ret = 0, len;
1177
1178         const u32 *data = of_get_property(np, "phy-handle", &len);
1179         if (!data || len != 4)
1180                 return -EINVAL;
1181
1182         phynode = of_find_node_by_phandle(*data);
1183         if (!phynode)
1184                 return -EINVAL;
1185
1186         mdionode = of_get_parent(phynode);
1187         if (!mdionode)
1188                 goto out_put_phy;
1189
1190         ret = of_address_to_resource(mdionode, 0, &res);
1191         if (ret)
1192                 goto out_put_mdio;
1193
1194         data = of_get_property(phynode, "reg", &len);
1195         if (!data || len != 4)
1196                 goto out_put_mdio;
1197
1198         snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data);
1199
1200 out_put_mdio:
1201         of_node_put(mdionode);
1202 out_put_phy:
1203         of_node_put(phynode);
1204         return ret;
1205 }
1206
1207 #ifdef CONFIG_FS_ENET_HAS_FEC
1208 #define IS_FEC(match) ((match)->data == &fs_fec_ops)
1209 #else
1210 #define IS_FEC(match) 0
1211 #endif
1212
1213 static int __devinit fs_enet_probe(struct of_device *ofdev,
1214                                    const struct of_device_id *match)
1215 {
1216         struct net_device *ndev;
1217         struct fs_enet_private *fep;
1218         struct fs_platform_info *fpi;
1219         const u32 *data;
1220         const u8 *mac_addr;
1221         int privsize, len, ret = -ENODEV;
1222
1223         fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1224         if (!fpi)
1225                 return -ENOMEM;
1226
1227         if (!IS_FEC(match)) {
1228                 data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
1229                 if (!data || len != 4)
1230                         goto out_free_fpi;
1231
1232                 fpi->cp_command = *data;
1233         }
1234
1235         fpi->rx_ring = 32;
1236         fpi->tx_ring = 32;
1237         fpi->rx_copybreak = 240;
1238         fpi->use_napi = 1;
1239         fpi->napi_weight = 17;
1240
1241         ret = find_phy(ofdev->node, fpi);
1242         if (ret)
1243                 goto out_free_fpi;
1244
1245         privsize = sizeof(*fep) +
1246                    sizeof(struct sk_buff **) *
1247                    (fpi->rx_ring + fpi->tx_ring);
1248
1249         ndev = alloc_etherdev(privsize);
1250         if (!ndev) {
1251                 ret = -ENOMEM;
1252                 goto out_free_fpi;
1253         }
1254
1255         dev_set_drvdata(&ofdev->dev, ndev);
1256
1257         fep = netdev_priv(ndev);
1258         fep->dev = &ofdev->dev;
1259         fep->ndev = ndev;
1260         fep->fpi = fpi;
1261         fep->ops = match->data;
1262
1263         ret = fep->ops->setup_data(ndev);
1264         if (ret)
1265                 goto out_free_dev;
1266
1267         fep->rx_skbuff = (struct sk_buff **)&fep[1];
1268         fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1269
1270         spin_lock_init(&fep->lock);
1271         spin_lock_init(&fep->tx_lock);
1272
1273         mac_addr = of_get_mac_address(ofdev->node);
1274         if (mac_addr)
1275                 memcpy(ndev->dev_addr, mac_addr, 6);
1276
1277         ret = fep->ops->allocate_bd(ndev);
1278         if (ret)
1279                 goto out_cleanup_data;
1280
1281         fep->rx_bd_base = fep->ring_base;
1282         fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1283
1284         fep->tx_ring = fpi->tx_ring;
1285         fep->rx_ring = fpi->rx_ring;
1286
1287         ndev->open = fs_enet_open;
1288         ndev->hard_start_xmit = fs_enet_start_xmit;
1289         ndev->tx_timeout = fs_timeout;
1290         ndev->watchdog_timeo = 2 * HZ;
1291         ndev->stop = fs_enet_close;
1292         ndev->get_stats = fs_enet_get_stats;
1293         ndev->set_multicast_list = fs_set_multicast_list;
1294
1295         if (fpi->use_napi)
1296                 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1297                                fpi->napi_weight);
1298
1299         ndev->ethtool_ops = &fs_ethtool_ops;
1300         ndev->do_ioctl = fs_ioctl;
1301
1302         init_timer(&fep->phy_timer_list);
1303
1304         netif_carrier_off(ndev);
1305
1306         ret = register_netdev(ndev);
1307         if (ret)
1308                 goto out_free_bd;
1309
1310         printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n",
1311                ndev->name,
1312                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1313                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1314
1315         return 0;
1316
1317 out_free_bd:
1318         fep->ops->free_bd(ndev);
1319 out_cleanup_data:
1320         fep->ops->cleanup_data(ndev);
1321 out_free_dev:
1322         free_netdev(ndev);
1323         dev_set_drvdata(&ofdev->dev, NULL);
1324 out_free_fpi:
1325         kfree(fpi);
1326         return ret;
1327 }
1328
1329 static int fs_enet_remove(struct of_device *ofdev)
1330 {
1331         struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
1332         struct fs_enet_private *fep = netdev_priv(ndev);
1333
1334         unregister_netdev(ndev);
1335
1336         fep->ops->free_bd(ndev);
1337         fep->ops->cleanup_data(ndev);
1338         dev_set_drvdata(fep->dev, NULL);
1339
1340         free_netdev(ndev);
1341         return 0;
1342 }
1343
1344 static struct of_device_id fs_enet_match[] = {
1345 #ifdef CONFIG_FS_ENET_HAS_SCC
1346         {
1347                 .compatible = "fsl,cpm1-scc-enet",
1348                 .data = (void *)&fs_scc_ops,
1349         },
1350 #endif
1351 #ifdef CONFIG_FS_ENET_HAS_FCC
1352         {
1353                 .compatible = "fsl,cpm2-fcc-enet",
1354                 .data = (void *)&fs_fcc_ops,
1355         },
1356 #endif
1357 #ifdef CONFIG_FS_ENET_HAS_FEC
1358         {
1359                 .compatible = "fsl,pq1-fec-enet",
1360                 .data = (void *)&fs_fec_ops,
1361         },
1362 #endif
1363         {}
1364 };
1365
1366 static struct of_platform_driver fs_enet_driver = {
1367         .name   = "fs_enet",
1368         .match_table = fs_enet_match,
1369         .probe = fs_enet_probe,
1370         .remove = fs_enet_remove,
1371 };
1372
1373 static int __init fs_init(void)
1374 {
1375         int r = setup_immap();
1376         if (r != 0)
1377                 return r;
1378
1379         r = of_register_platform_driver(&fs_enet_driver);
1380         if (r != 0)
1381                 goto out;
1382
1383         return 0;
1384
1385 out:
1386         cleanup_immap();
1387         return r;
1388 }
1389
1390 static void __exit fs_cleanup(void)
1391 {
1392         of_unregister_platform_driver(&fs_enet_driver);
1393         cleanup_immap();
1394 }
1395 #else
1396 static int __devinit fs_enet_probe(struct device *dev)
1397 {
1398         struct net_device *ndev;
1399
1400         /* no fixup - no device */
1401         if (dev->platform_data == NULL) {
1402                 printk(KERN_INFO "fs_enet: "
1403                                 "probe called with no platform data; "
1404                                 "remove unused devices\n");
1405                 return -ENODEV;
1406         }
1407
1408         ndev = fs_init_instance(dev, dev->platform_data);
1409         if (IS_ERR(ndev))
1410                 return PTR_ERR(ndev);
1411         return 0;
1412 }
1413
1414 static int fs_enet_remove(struct device *dev)
1415 {
1416         return fs_cleanup_instance(dev_get_drvdata(dev));
1417 }
1418
1419 static struct device_driver fs_enet_fec_driver = {
1420         .name           = "fsl-cpm-fec",
1421         .bus            = &platform_bus_type,
1422         .probe          = fs_enet_probe,
1423         .remove         = fs_enet_remove,
1424 #ifdef CONFIG_PM
1425 /*      .suspend        = fs_enet_suspend,      TODO */
1426 /*      .resume         = fs_enet_resume,       TODO */
1427 #endif
1428 };
1429
1430 static struct device_driver fs_enet_scc_driver = {
1431         .name           = "fsl-cpm-scc",
1432         .bus            = &platform_bus_type,
1433         .probe          = fs_enet_probe,
1434         .remove         = fs_enet_remove,
1435 #ifdef CONFIG_PM
1436 /*      .suspend        = fs_enet_suspend,      TODO */
1437 /*      .resume         = fs_enet_resume,       TODO */
1438 #endif
1439 };
1440
1441 static struct device_driver fs_enet_fcc_driver = {
1442         .name           = "fsl-cpm-fcc",
1443         .bus            = &platform_bus_type,
1444         .probe          = fs_enet_probe,
1445         .remove         = fs_enet_remove,
1446 #ifdef CONFIG_PM
1447 /*      .suspend        = fs_enet_suspend,      TODO */
1448 /*      .resume         = fs_enet_resume,       TODO */
1449 #endif
1450 };
1451
1452 static int __init fs_init(void)
1453 {
1454         int r;
1455
1456         printk(KERN_INFO
1457                         "%s", version);
1458
1459         r = setup_immap();
1460         if (r != 0)
1461                 return r;
1462
1463 #ifdef CONFIG_FS_ENET_HAS_FCC
1464         /* let's insert mii stuff */
1465         r = fs_enet_mdio_bb_init();
1466
1467         if (r != 0) {
1468                 printk(KERN_ERR DRV_MODULE_NAME
1469                         "BB PHY init failed.\n");
1470                 return r;
1471         }
1472         r = driver_register(&fs_enet_fcc_driver);
1473         if (r != 0)
1474                 goto err;
1475 #endif
1476
1477 #ifdef CONFIG_FS_ENET_HAS_FEC
1478         r =  fs_enet_mdio_fec_init();
1479         if (r != 0) {
1480                 printk(KERN_ERR DRV_MODULE_NAME
1481                         "FEC PHY init failed.\n");
1482                 return r;
1483         }
1484
1485         r = driver_register(&fs_enet_fec_driver);
1486         if (r != 0)
1487                 goto err;
1488 #endif
1489
1490 #ifdef CONFIG_FS_ENET_HAS_SCC
1491         r = driver_register(&fs_enet_scc_driver);
1492         if (r != 0)
1493                 goto err;
1494 #endif
1495
1496         return 0;
1497 err:
1498         cleanup_immap();
1499         return r;
1500 }
1501
1502 static void __exit fs_cleanup(void)
1503 {
1504         driver_unregister(&fs_enet_fec_driver);
1505         driver_unregister(&fs_enet_fcc_driver);
1506         driver_unregister(&fs_enet_scc_driver);
1507         cleanup_immap();
1508 }
1509 #endif
1510
1511 #ifdef CONFIG_NET_POLL_CONTROLLER
1512 static void fs_enet_netpoll(struct net_device *dev)
1513 {
1514        disable_irq(dev->irq);
1515        fs_enet_interrupt(dev->irq, dev, NULL);
1516        enable_irq(dev->irq);
1517 }
1518 #endif
1519
1520 /**************************************************************************************/
1521
1522 module_init(fs_init);
1523 module_exit(fs_cleanup);