fs_enet: Align receive buffers.
[linux-2.6] / drivers / net / fs_enet / fs_enet-main.c
1 /*
2  * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11  * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12  *
13  * This file is licensed under the terms of the GNU General Public License
14  * version 2. This program is licensed "as is" without any warranty of any
15  * kind, whether express or implied.
16  */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/ptrace.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/bitops.h>
36 #include <linux/fs.h>
37 #include <linux/platform_device.h>
38 #include <linux/phy.h>
39
40 #include <linux/vmalloc.h>
41 #include <asm/pgtable.h>
42 #include <asm/irq.h>
43 #include <asm/uaccess.h>
44
45 #include "fs_enet.h"
46
47 /*************************************************/
48
49 static char version[] __devinitdata =
50     DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
51
52 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
53 MODULE_DESCRIPTION("Freescale Ethernet Driver");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(DRV_MODULE_VERSION);
56
57 int fs_enet_debug = -1;         /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
58 module_param(fs_enet_debug, int, 0);
59 MODULE_PARM_DESC(fs_enet_debug,
60                  "Freescale bitmapped debugging message enable value");
61
62 #ifdef CONFIG_NET_POLL_CONTROLLER
63 static void fs_enet_netpoll(struct net_device *dev);
64 #endif
65
66 static void fs_set_multicast_list(struct net_device *dev)
67 {
68         struct fs_enet_private *fep = netdev_priv(dev);
69
70         (*fep->ops->set_multicast_list)(dev);
71 }
72
73 static void skb_align(struct sk_buff *skb, int align)
74 {
75         int off = ((unsigned long)skb->data) & (align - 1);
76
77         if (off)
78                 skb_reserve(skb, align - off);
79 }
80
81 /* NAPI receive function */
82 static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
83 {
84         struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
85         struct net_device *dev = to_net_dev(fep->dev);
86         const struct fs_platform_info *fpi = fep->fpi;
87         cbd_t *bdp;
88         struct sk_buff *skb, *skbn, *skbt;
89         int received = 0;
90         u16 pkt_len, sc;
91         int curidx;
92
93         if (!netif_running(dev))
94                 return 0;
95
96         /*
97          * First, grab all of the stats for the incoming packet.
98          * These get messed up if we get called due to a busy condition.
99          */
100         bdp = fep->cur_rx;
101
102         /* clear RX status bits for napi*/
103         (*fep->ops->napi_clear_rx_event)(dev);
104
105         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
106                 curidx = bdp - fep->rx_bd_base;
107
108                 /*
109                  * Since we have allocated space to hold a complete frame,
110                  * the last indicator should be set.
111                  */
112                 if ((sc & BD_ENET_RX_LAST) == 0)
113                         printk(KERN_WARNING DRV_MODULE_NAME
114                                ": %s rcv is not +last\n",
115                                dev->name);
116
117                 /*
118                  * Check for errors.
119                  */
120                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
121                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
122                         fep->stats.rx_errors++;
123                         /* Frame too long or too short. */
124                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
125                                 fep->stats.rx_length_errors++;
126                         /* Frame alignment */
127                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
128                                 fep->stats.rx_frame_errors++;
129                         /* CRC Error */
130                         if (sc & BD_ENET_RX_CR)
131                                 fep->stats.rx_crc_errors++;
132                         /* FIFO overrun */
133                         if (sc & BD_ENET_RX_OV)
134                                 fep->stats.rx_crc_errors++;
135
136                         skb = fep->rx_skbuff[curidx];
137
138                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
139                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
140                                 DMA_FROM_DEVICE);
141
142                         skbn = skb;
143
144                 } else {
145                         skb = fep->rx_skbuff[curidx];
146
147                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
148                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149                                 DMA_FROM_DEVICE);
150
151                         /*
152                          * Process the incoming frame.
153                          */
154                         fep->stats.rx_packets++;
155                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156                         fep->stats.rx_bytes += pkt_len + 4;
157
158                         if (pkt_len <= fpi->rx_copybreak) {
159                                 /* +2 to make IP header L1 cache aligned */
160                                 skbn = dev_alloc_skb(pkt_len + 2);
161                                 if (skbn != NULL) {
162                                         skb_reserve(skbn, 2);   /* align IP header */
163                                         skb_copy_from_linear_data(skb,
164                                                       skbn->data, pkt_len);
165                                         /* swap */
166                                         skbt = skb;
167                                         skb = skbn;
168                                         skbn = skbt;
169                                 }
170                         } else {
171                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
172
173                                 if (skbn)
174                                         skb_align(skbn, ENET_RX_ALIGN);
175                         }
176
177                         if (skbn != NULL) {
178                                 skb_put(skb, pkt_len);  /* Make room */
179                                 skb->protocol = eth_type_trans(skb, dev);
180                                 received++;
181                                 netif_receive_skb(skb);
182                         } else {
183                                 printk(KERN_WARNING DRV_MODULE_NAME
184                                        ": %s Memory squeeze, dropping packet.\n",
185                                        dev->name);
186                                 fep->stats.rx_dropped++;
187                                 skbn = skb;
188                         }
189                 }
190
191                 fep->rx_skbuff[curidx] = skbn;
192                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
193                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
194                              DMA_FROM_DEVICE));
195                 CBDW_DATLEN(bdp, 0);
196                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
197
198                 /*
199                  * Update BD pointer to next entry.
200                  */
201                 if ((sc & BD_ENET_RX_WRAP) == 0)
202                         bdp++;
203                 else
204                         bdp = fep->rx_bd_base;
205
206                 (*fep->ops->rx_bd_done)(dev);
207
208                 if (received >= budget)
209                         break;
210         }
211
212         fep->cur_rx = bdp;
213
214         if (received >= budget) {
215                 /* done */
216                 netif_rx_complete(dev, napi);
217                 (*fep->ops->napi_enable_rx)(dev);
218         }
219         return received;
220 }
221
222 /* non NAPI receive function */
223 static int fs_enet_rx_non_napi(struct net_device *dev)
224 {
225         struct fs_enet_private *fep = netdev_priv(dev);
226         const struct fs_platform_info *fpi = fep->fpi;
227         cbd_t *bdp;
228         struct sk_buff *skb, *skbn, *skbt;
229         int received = 0;
230         u16 pkt_len, sc;
231         int curidx;
232         /*
233          * First, grab all of the stats for the incoming packet.
234          * These get messed up if we get called due to a busy condition.
235          */
236         bdp = fep->cur_rx;
237
238         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
239
240                 curidx = bdp - fep->rx_bd_base;
241
242                 /*
243                  * Since we have allocated space to hold a complete frame,
244                  * the last indicator should be set.
245                  */
246                 if ((sc & BD_ENET_RX_LAST) == 0)
247                         printk(KERN_WARNING DRV_MODULE_NAME
248                                ": %s rcv is not +last\n",
249                                dev->name);
250
251                 /*
252                  * Check for errors.
253                  */
254                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
255                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
256                         fep->stats.rx_errors++;
257                         /* Frame too long or too short. */
258                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
259                                 fep->stats.rx_length_errors++;
260                         /* Frame alignment */
261                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
262                                 fep->stats.rx_frame_errors++;
263                         /* CRC Error */
264                         if (sc & BD_ENET_RX_CR)
265                                 fep->stats.rx_crc_errors++;
266                         /* FIFO overrun */
267                         if (sc & BD_ENET_RX_OV)
268                                 fep->stats.rx_crc_errors++;
269
270                         skb = fep->rx_skbuff[curidx];
271
272                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
273                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
274                                 DMA_FROM_DEVICE);
275
276                         skbn = skb;
277
278                 } else {
279
280                         skb = fep->rx_skbuff[curidx];
281
282                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
283                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
284                                 DMA_FROM_DEVICE);
285
286                         /*
287                          * Process the incoming frame.
288                          */
289                         fep->stats.rx_packets++;
290                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
291                         fep->stats.rx_bytes += pkt_len + 4;
292
293                         if (pkt_len <= fpi->rx_copybreak) {
294                                 /* +2 to make IP header L1 cache aligned */
295                                 skbn = dev_alloc_skb(pkt_len + 2);
296                                 if (skbn != NULL) {
297                                         skb_reserve(skbn, 2);   /* align IP header */
298                                         skb_copy_from_linear_data(skb,
299                                                       skbn->data, pkt_len);
300                                         /* swap */
301                                         skbt = skb;
302                                         skb = skbn;
303                                         skbn = skbt;
304                                 }
305                         } else {
306                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
307
308                                 if (skbn)
309                                         skb_align(skbn, ENET_RX_ALIGN);
310                         }
311
312                         if (skbn != NULL) {
313                                 skb_put(skb, pkt_len);  /* Make room */
314                                 skb->protocol = eth_type_trans(skb, dev);
315                                 received++;
316                                 netif_rx(skb);
317                         } else {
318                                 printk(KERN_WARNING DRV_MODULE_NAME
319                                        ": %s Memory squeeze, dropping packet.\n",
320                                        dev->name);
321                                 fep->stats.rx_dropped++;
322                                 skbn = skb;
323                         }
324                 }
325
326                 fep->rx_skbuff[curidx] = skbn;
327                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
328                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
329                              DMA_FROM_DEVICE));
330                 CBDW_DATLEN(bdp, 0);
331                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
332
333                 /*
334                  * Update BD pointer to next entry.
335                  */
336                 if ((sc & BD_ENET_RX_WRAP) == 0)
337                         bdp++;
338                 else
339                         bdp = fep->rx_bd_base;
340
341                 (*fep->ops->rx_bd_done)(dev);
342         }
343
344         fep->cur_rx = bdp;
345
346         return 0;
347 }
348
349 static void fs_enet_tx(struct net_device *dev)
350 {
351         struct fs_enet_private *fep = netdev_priv(dev);
352         cbd_t *bdp;
353         struct sk_buff *skb;
354         int dirtyidx, do_wake, do_restart;
355         u16 sc;
356
357         spin_lock(&fep->tx_lock);
358         bdp = fep->dirty_tx;
359
360         do_wake = do_restart = 0;
361         while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
362                 dirtyidx = bdp - fep->tx_bd_base;
363
364                 if (fep->tx_free == fep->tx_ring)
365                         break;
366
367                 skb = fep->tx_skbuff[dirtyidx];
368
369                 /*
370                  * Check for errors.
371                  */
372                 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
373                           BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
374
375                         if (sc & BD_ENET_TX_HB) /* No heartbeat */
376                                 fep->stats.tx_heartbeat_errors++;
377                         if (sc & BD_ENET_TX_LC) /* Late collision */
378                                 fep->stats.tx_window_errors++;
379                         if (sc & BD_ENET_TX_RL) /* Retrans limit */
380                                 fep->stats.tx_aborted_errors++;
381                         if (sc & BD_ENET_TX_UN) /* Underrun */
382                                 fep->stats.tx_fifo_errors++;
383                         if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
384                                 fep->stats.tx_carrier_errors++;
385
386                         if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
387                                 fep->stats.tx_errors++;
388                                 do_restart = 1;
389                         }
390                 } else
391                         fep->stats.tx_packets++;
392
393                 if (sc & BD_ENET_TX_READY)
394                         printk(KERN_WARNING DRV_MODULE_NAME
395                                ": %s HEY! Enet xmit interrupt and TX_READY.\n",
396                                dev->name);
397
398                 /*
399                  * Deferred means some collisions occurred during transmit,
400                  * but we eventually sent the packet OK.
401                  */
402                 if (sc & BD_ENET_TX_DEF)
403                         fep->stats.collisions++;
404
405                 /* unmap */
406                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
407                                 skb->len, DMA_TO_DEVICE);
408
409                 /*
410                  * Free the sk buffer associated with this last transmit.
411                  */
412                 dev_kfree_skb_irq(skb);
413                 fep->tx_skbuff[dirtyidx] = NULL;
414
415                 /*
416                  * Update pointer to next buffer descriptor to be transmitted.
417                  */
418                 if ((sc & BD_ENET_TX_WRAP) == 0)
419                         bdp++;
420                 else
421                         bdp = fep->tx_bd_base;
422
423                 /*
424                  * Since we have freed up a buffer, the ring is no longer
425                  * full.
426                  */
427                 if (!fep->tx_free++)
428                         do_wake = 1;
429         }
430
431         fep->dirty_tx = bdp;
432
433         if (do_restart)
434                 (*fep->ops->tx_restart)(dev);
435
436         spin_unlock(&fep->tx_lock);
437
438         if (do_wake)
439                 netif_wake_queue(dev);
440 }
441
442 /*
443  * The interrupt handler.
444  * This is called from the MPC core interrupt.
445  */
446 static irqreturn_t
447 fs_enet_interrupt(int irq, void *dev_id)
448 {
449         struct net_device *dev = dev_id;
450         struct fs_enet_private *fep;
451         const struct fs_platform_info *fpi;
452         u32 int_events;
453         u32 int_clr_events;
454         int nr, napi_ok;
455         int handled;
456
457         fep = netdev_priv(dev);
458         fpi = fep->fpi;
459
460         nr = 0;
461         while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
462                 nr++;
463
464                 int_clr_events = int_events;
465                 if (fpi->use_napi)
466                         int_clr_events &= ~fep->ev_napi_rx;
467
468                 (*fep->ops->clear_int_events)(dev, int_clr_events);
469
470                 if (int_events & fep->ev_err)
471                         (*fep->ops->ev_error)(dev, int_events);
472
473                 if (int_events & fep->ev_rx) {
474                         if (!fpi->use_napi)
475                                 fs_enet_rx_non_napi(dev);
476                         else {
477                                 napi_ok = napi_schedule_prep(&fep->napi);
478
479                                 (*fep->ops->napi_disable_rx)(dev);
480                                 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
481
482                                 /* NOTE: it is possible for FCCs in NAPI mode    */
483                                 /* to submit a spurious interrupt while in poll  */
484                                 if (napi_ok)
485                                         __netif_rx_schedule(dev, &fep->napi);
486                         }
487                 }
488
489                 if (int_events & fep->ev_tx)
490                         fs_enet_tx(dev);
491         }
492
493         handled = nr > 0;
494         return IRQ_RETVAL(handled);
495 }
496
497 void fs_init_bds(struct net_device *dev)
498 {
499         struct fs_enet_private *fep = netdev_priv(dev);
500         cbd_t *bdp;
501         struct sk_buff *skb;
502         int i;
503
504         fs_cleanup_bds(dev);
505
506         fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
507         fep->tx_free = fep->tx_ring;
508         fep->cur_rx = fep->rx_bd_base;
509
510         /*
511          * Initialize the receive buffer descriptors.
512          */
513         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
514                 skb = dev_alloc_skb(ENET_RX_FRSIZE);
515                 if (skb == NULL) {
516                         printk(KERN_WARNING DRV_MODULE_NAME
517                                ": %s Memory squeeze, unable to allocate skb\n",
518                                dev->name);
519                         break;
520                 }
521                 skb_align(skb, ENET_RX_ALIGN);
522                 fep->rx_skbuff[i] = skb;
523                 CBDW_BUFADDR(bdp,
524                         dma_map_single(fep->dev, skb->data,
525                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
526                                 DMA_FROM_DEVICE));
527                 CBDW_DATLEN(bdp, 0);    /* zero */
528                 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
529                         ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
530         }
531         /*
532          * if we failed, fillup remainder
533          */
534         for (; i < fep->rx_ring; i++, bdp++) {
535                 fep->rx_skbuff[i] = NULL;
536                 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
537         }
538
539         /*
540          * ...and the same for transmit.
541          */
542         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
543                 fep->tx_skbuff[i] = NULL;
544                 CBDW_BUFADDR(bdp, 0);
545                 CBDW_DATLEN(bdp, 0);
546                 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
547         }
548 }
549
550 void fs_cleanup_bds(struct net_device *dev)
551 {
552         struct fs_enet_private *fep = netdev_priv(dev);
553         struct sk_buff *skb;
554         cbd_t *bdp;
555         int i;
556
557         /*
558          * Reset SKB transmit buffers.
559          */
560         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
561                 if ((skb = fep->tx_skbuff[i]) == NULL)
562                         continue;
563
564                 /* unmap */
565                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
566                                 skb->len, DMA_TO_DEVICE);
567
568                 fep->tx_skbuff[i] = NULL;
569                 dev_kfree_skb(skb);
570         }
571
572         /*
573          * Reset SKB receive buffers
574          */
575         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
576                 if ((skb = fep->rx_skbuff[i]) == NULL)
577                         continue;
578
579                 /* unmap */
580                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
581                         L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
582                         DMA_FROM_DEVICE);
583
584                 fep->rx_skbuff[i] = NULL;
585
586                 dev_kfree_skb(skb);
587         }
588 }
589
590 /**********************************************************************************/
591
592 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
593 {
594         struct fs_enet_private *fep = netdev_priv(dev);
595         cbd_t *bdp;
596         int curidx;
597         u16 sc;
598         unsigned long flags;
599
600         spin_lock_irqsave(&fep->tx_lock, flags);
601
602         /*
603          * Fill in a Tx ring entry
604          */
605         bdp = fep->cur_tx;
606
607         if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
608                 netif_stop_queue(dev);
609                 spin_unlock_irqrestore(&fep->tx_lock, flags);
610
611                 /*
612                  * Ooops.  All transmit buffers are full.  Bail out.
613                  * This should not happen, since the tx queue should be stopped.
614                  */
615                 printk(KERN_WARNING DRV_MODULE_NAME
616                        ": %s tx queue full!.\n", dev->name);
617                 return NETDEV_TX_BUSY;
618         }
619
620         curidx = bdp - fep->tx_bd_base;
621         /*
622          * Clear all of the status flags.
623          */
624         CBDC_SC(bdp, BD_ENET_TX_STATS);
625
626         /*
627          * Save skb pointer.
628          */
629         fep->tx_skbuff[curidx] = skb;
630
631         fep->stats.tx_bytes += skb->len;
632
633         /*
634          * Push the data cache so the CPM does not get stale memory data.
635          */
636         CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
637                                 skb->data, skb->len, DMA_TO_DEVICE));
638         CBDW_DATLEN(bdp, skb->len);
639
640         dev->trans_start = jiffies;
641
642         /*
643          * If this was the last BD in the ring, start at the beginning again.
644          */
645         if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
646                 fep->cur_tx++;
647         else
648                 fep->cur_tx = fep->tx_bd_base;
649
650         if (!--fep->tx_free)
651                 netif_stop_queue(dev);
652
653         /* Trigger transmission start */
654         sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
655              BD_ENET_TX_LAST | BD_ENET_TX_TC;
656
657         /* note that while FEC does not have this bit
658          * it marks it as available for software use
659          * yay for hw reuse :) */
660         if (skb->len <= 60)
661                 sc |= BD_ENET_TX_PAD;
662         CBDS_SC(bdp, sc);
663
664         (*fep->ops->tx_kickstart)(dev);
665
666         spin_unlock_irqrestore(&fep->tx_lock, flags);
667
668         return NETDEV_TX_OK;
669 }
670
671 static int fs_request_irq(struct net_device *dev, int irq, const char *name,
672                 irq_handler_t irqf)
673 {
674         struct fs_enet_private *fep = netdev_priv(dev);
675
676         (*fep->ops->pre_request_irq)(dev, irq);
677         return request_irq(irq, irqf, IRQF_SHARED, name, dev);
678 }
679
680 static void fs_free_irq(struct net_device *dev, int irq)
681 {
682         struct fs_enet_private *fep = netdev_priv(dev);
683
684         free_irq(irq, dev);
685         (*fep->ops->post_free_irq)(dev, irq);
686 }
687
688 static void fs_timeout(struct net_device *dev)
689 {
690         struct fs_enet_private *fep = netdev_priv(dev);
691         unsigned long flags;
692         int wake = 0;
693
694         fep->stats.tx_errors++;
695
696         spin_lock_irqsave(&fep->lock, flags);
697
698         if (dev->flags & IFF_UP) {
699                 phy_stop(fep->phydev);
700                 (*fep->ops->stop)(dev);
701                 (*fep->ops->restart)(dev);
702                 phy_start(fep->phydev);
703         }
704
705         phy_start(fep->phydev);
706         wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
707         spin_unlock_irqrestore(&fep->lock, flags);
708
709         if (wake)
710                 netif_wake_queue(dev);
711 }
712
713 /*-----------------------------------------------------------------------------
714  *  generic link-change handler - should be sufficient for most cases
715  *-----------------------------------------------------------------------------*/
716 static void generic_adjust_link(struct  net_device *dev)
717 {
718         struct fs_enet_private *fep = netdev_priv(dev);
719         struct phy_device *phydev = fep->phydev;
720         int new_state = 0;
721
722         if (phydev->link) {
723                 /* adjust to duplex mode */
724                 if (phydev->duplex != fep->oldduplex) {
725                         new_state = 1;
726                         fep->oldduplex = phydev->duplex;
727                 }
728
729                 if (phydev->speed != fep->oldspeed) {
730                         new_state = 1;
731                         fep->oldspeed = phydev->speed;
732                 }
733
734                 if (!fep->oldlink) {
735                         new_state = 1;
736                         fep->oldlink = 1;
737                         netif_schedule(dev);
738                         netif_carrier_on(dev);
739                         netif_start_queue(dev);
740                 }
741
742                 if (new_state)
743                         fep->ops->restart(dev);
744         } else if (fep->oldlink) {
745                 new_state = 1;
746                 fep->oldlink = 0;
747                 fep->oldspeed = 0;
748                 fep->oldduplex = -1;
749                 netif_carrier_off(dev);
750                 netif_stop_queue(dev);
751         }
752
753         if (new_state && netif_msg_link(fep))
754                 phy_print_status(phydev);
755 }
756
757
758 static void fs_adjust_link(struct net_device *dev)
759 {
760         struct fs_enet_private *fep = netdev_priv(dev);
761         unsigned long flags;
762
763         spin_lock_irqsave(&fep->lock, flags);
764
765         if(fep->ops->adjust_link)
766                 fep->ops->adjust_link(dev);
767         else
768                 generic_adjust_link(dev);
769
770         spin_unlock_irqrestore(&fep->lock, flags);
771 }
772
773 static int fs_init_phy(struct net_device *dev)
774 {
775         struct fs_enet_private *fep = netdev_priv(dev);
776         struct phy_device *phydev;
777
778         fep->oldlink = 0;
779         fep->oldspeed = 0;
780         fep->oldduplex = -1;
781         if(fep->fpi->bus_id)
782                 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
783                                 PHY_INTERFACE_MODE_MII);
784         else {
785                 printk("No phy bus ID specified in BSP code\n");
786                 return -EINVAL;
787         }
788         if (IS_ERR(phydev)) {
789                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
790                 return PTR_ERR(phydev);
791         }
792
793         fep->phydev = phydev;
794
795         return 0;
796 }
797
798 static int fs_enet_open(struct net_device *dev)
799 {
800         struct fs_enet_private *fep = netdev_priv(dev);
801         int r;
802         int err;
803
804         napi_enable(&fep->napi);
805
806         /* Install our interrupt handler. */
807         r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
808         if (r != 0) {
809                 printk(KERN_ERR DRV_MODULE_NAME
810                        ": %s Could not allocate FS_ENET IRQ!", dev->name);
811                 napi_disable(&fep->napi);
812                 return -EINVAL;
813         }
814
815         err = fs_init_phy(dev);
816         if(err) {
817                 napi_disable(&fep->napi);
818                 return err;
819         }
820         phy_start(fep->phydev);
821
822         return 0;
823 }
824
825 static int fs_enet_close(struct net_device *dev)
826 {
827         struct fs_enet_private *fep = netdev_priv(dev);
828         unsigned long flags;
829
830         netif_stop_queue(dev);
831         netif_carrier_off(dev);
832         napi_disable(&fep->napi);
833         phy_stop(fep->phydev);
834
835         spin_lock_irqsave(&fep->lock, flags);
836         spin_lock(&fep->tx_lock);
837         (*fep->ops->stop)(dev);
838         spin_unlock(&fep->tx_lock);
839         spin_unlock_irqrestore(&fep->lock, flags);
840
841         /* release any irqs */
842         phy_disconnect(fep->phydev);
843         fep->phydev = NULL;
844         fs_free_irq(dev, fep->interrupt);
845
846         return 0;
847 }
848
849 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
850 {
851         struct fs_enet_private *fep = netdev_priv(dev);
852         return &fep->stats;
853 }
854
855 /*************************************************************************/
856
857 static void fs_get_drvinfo(struct net_device *dev,
858                             struct ethtool_drvinfo *info)
859 {
860         strcpy(info->driver, DRV_MODULE_NAME);
861         strcpy(info->version, DRV_MODULE_VERSION);
862 }
863
864 static int fs_get_regs_len(struct net_device *dev)
865 {
866         struct fs_enet_private *fep = netdev_priv(dev);
867
868         return (*fep->ops->get_regs_len)(dev);
869 }
870
871 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
872                          void *p)
873 {
874         struct fs_enet_private *fep = netdev_priv(dev);
875         unsigned long flags;
876         int r, len;
877
878         len = regs->len;
879
880         spin_lock_irqsave(&fep->lock, flags);
881         r = (*fep->ops->get_regs)(dev, p, &len);
882         spin_unlock_irqrestore(&fep->lock, flags);
883
884         if (r == 0)
885                 regs->version = 0;
886 }
887
888 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
889 {
890         struct fs_enet_private *fep = netdev_priv(dev);
891         return phy_ethtool_gset(fep->phydev, cmd);
892 }
893
894 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
895 {
896         struct fs_enet_private *fep = netdev_priv(dev);
897         phy_ethtool_sset(fep->phydev, cmd);
898         return 0;
899 }
900
901 static int fs_nway_reset(struct net_device *dev)
902 {
903         return 0;
904 }
905
906 static u32 fs_get_msglevel(struct net_device *dev)
907 {
908         struct fs_enet_private *fep = netdev_priv(dev);
909         return fep->msg_enable;
910 }
911
912 static void fs_set_msglevel(struct net_device *dev, u32 value)
913 {
914         struct fs_enet_private *fep = netdev_priv(dev);
915         fep->msg_enable = value;
916 }
917
918 static const struct ethtool_ops fs_ethtool_ops = {
919         .get_drvinfo = fs_get_drvinfo,
920         .get_regs_len = fs_get_regs_len,
921         .get_settings = fs_get_settings,
922         .set_settings = fs_set_settings,
923         .nway_reset = fs_nway_reset,
924         .get_link = ethtool_op_get_link,
925         .get_msglevel = fs_get_msglevel,
926         .set_msglevel = fs_set_msglevel,
927         .set_tx_csum = ethtool_op_set_tx_csum,  /* local! */
928         .set_sg = ethtool_op_set_sg,
929         .get_regs = fs_get_regs,
930 };
931
932 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
933 {
934         struct fs_enet_private *fep = netdev_priv(dev);
935         struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
936         unsigned long flags;
937         int rc;
938
939         if (!netif_running(dev))
940                 return -EINVAL;
941
942         spin_lock_irqsave(&fep->lock, flags);
943         rc = phy_mii_ioctl(fep->phydev, mii, cmd);
944         spin_unlock_irqrestore(&fep->lock, flags);
945         return rc;
946 }
947
948 extern int fs_mii_connect(struct net_device *dev);
949 extern void fs_mii_disconnect(struct net_device *dev);
950
951 static struct net_device *fs_init_instance(struct device *dev,
952                 struct fs_platform_info *fpi)
953 {
954         struct net_device *ndev = NULL;
955         struct fs_enet_private *fep = NULL;
956         int privsize, i, r, err = 0, registered = 0;
957
958         fpi->fs_no = fs_get_id(fpi);
959         /* guard */
960         if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
961                 return ERR_PTR(-EINVAL);
962
963         privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
964                             (fpi->rx_ring + fpi->tx_ring));
965
966         ndev = alloc_etherdev(privsize);
967         if (!ndev) {
968                 err = -ENOMEM;
969                 goto err;
970         }
971
972         fep = netdev_priv(ndev);
973
974         fep->dev = dev;
975         dev_set_drvdata(dev, ndev);
976         fep->fpi = fpi;
977         if (fpi->init_ioports)
978                 fpi->init_ioports((struct fs_platform_info *)fpi);
979
980 #ifdef CONFIG_FS_ENET_HAS_FEC
981         if (fs_get_fec_index(fpi->fs_no) >= 0)
982                 fep->ops = &fs_fec_ops;
983 #endif
984
985 #ifdef CONFIG_FS_ENET_HAS_SCC
986         if (fs_get_scc_index(fpi->fs_no) >=0)
987                 fep->ops = &fs_scc_ops;
988 #endif
989
990 #ifdef CONFIG_FS_ENET_HAS_FCC
991         if (fs_get_fcc_index(fpi->fs_no) >= 0)
992                 fep->ops = &fs_fcc_ops;
993 #endif
994
995         if (fep->ops == NULL) {
996                 printk(KERN_ERR DRV_MODULE_NAME
997                        ": %s No matching ops found (%d).\n",
998                        ndev->name, fpi->fs_no);
999                 err = -EINVAL;
1000                 goto err;
1001         }
1002
1003         r = (*fep->ops->setup_data)(ndev);
1004         if (r != 0) {
1005                 printk(KERN_ERR DRV_MODULE_NAME
1006                        ": %s setup_data failed\n",
1007                         ndev->name);
1008                 err = r;
1009                 goto err;
1010         }
1011
1012         /* point rx_skbuff, tx_skbuff */
1013         fep->rx_skbuff = (struct sk_buff **)&fep[1];
1014         fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1015
1016         /* init locks */
1017         spin_lock_init(&fep->lock);
1018         spin_lock_init(&fep->tx_lock);
1019
1020         /*
1021          * Set the Ethernet address.
1022          */
1023         for (i = 0; i < 6; i++)
1024                 ndev->dev_addr[i] = fpi->macaddr[i];
1025
1026         r = (*fep->ops->allocate_bd)(ndev);
1027
1028         if (fep->ring_base == NULL) {
1029                 printk(KERN_ERR DRV_MODULE_NAME
1030                        ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
1031                 err = r;
1032                 goto err;
1033         }
1034
1035         /*
1036          * Set receive and transmit descriptor base.
1037          */
1038         fep->rx_bd_base = fep->ring_base;
1039         fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1040
1041         /* initialize ring size variables */
1042         fep->tx_ring = fpi->tx_ring;
1043         fep->rx_ring = fpi->rx_ring;
1044
1045         /*
1046          * The FEC Ethernet specific entries in the device structure.
1047          */
1048         ndev->open = fs_enet_open;
1049         ndev->hard_start_xmit = fs_enet_start_xmit;
1050         ndev->tx_timeout = fs_timeout;
1051         ndev->watchdog_timeo = 2 * HZ;
1052         ndev->stop = fs_enet_close;
1053         ndev->get_stats = fs_enet_get_stats;
1054         ndev->set_multicast_list = fs_set_multicast_list;
1055
1056 #ifdef CONFIG_NET_POLL_CONTROLLER
1057         ndev->poll_controller = fs_enet_netpoll;
1058 #endif
1059
1060         netif_napi_add(ndev, &fep->napi,
1061                        fs_enet_rx_napi, fpi->napi_weight);
1062
1063         ndev->ethtool_ops = &fs_ethtool_ops;
1064         ndev->do_ioctl = fs_ioctl;
1065
1066         init_timer(&fep->phy_timer_list);
1067
1068         netif_carrier_off(ndev);
1069
1070         err = register_netdev(ndev);
1071         if (err != 0) {
1072                 printk(KERN_ERR DRV_MODULE_NAME
1073                        ": %s register_netdev failed.\n", ndev->name);
1074                 goto err;
1075         }
1076         registered = 1;
1077
1078
1079         return ndev;
1080
1081 err:
1082         if (ndev != NULL) {
1083                 if (registered)
1084                         unregister_netdev(ndev);
1085
1086                 if (fep != NULL) {
1087                         (*fep->ops->free_bd)(ndev);
1088                         (*fep->ops->cleanup_data)(ndev);
1089                 }
1090
1091                 free_netdev(ndev);
1092         }
1093
1094         dev_set_drvdata(dev, NULL);
1095
1096         return ERR_PTR(err);
1097 }
1098
1099 static int fs_cleanup_instance(struct net_device *ndev)
1100 {
1101         struct fs_enet_private *fep;
1102         const struct fs_platform_info *fpi;
1103         struct device *dev;
1104
1105         if (ndev == NULL)
1106                 return -EINVAL;
1107
1108         fep = netdev_priv(ndev);
1109         if (fep == NULL)
1110                 return -EINVAL;
1111
1112         fpi = fep->fpi;
1113
1114         unregister_netdev(ndev);
1115
1116         dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1117                           fep->ring_base, fep->ring_mem_addr);
1118
1119         /* reset it */
1120         (*fep->ops->cleanup_data)(ndev);
1121
1122         dev = fep->dev;
1123         if (dev != NULL) {
1124                 dev_set_drvdata(dev, NULL);
1125                 fep->dev = NULL;
1126         }
1127
1128         free_netdev(ndev);
1129
1130         return 0;
1131 }
1132
1133 /**************************************************************************************/
1134
1135 /* handy pointer to the immap */
1136 void *fs_enet_immap = NULL;
1137
1138 static int setup_immap(void)
1139 {
1140         phys_addr_t paddr = 0;
1141         unsigned long size = 0;
1142
1143 #ifdef CONFIG_CPM1
1144         paddr = IMAP_ADDR;
1145         size = 0x10000; /* map 64K */
1146 #endif
1147
1148 #ifdef CONFIG_CPM2
1149         paddr = CPM_MAP_ADDR;
1150         size = 0x40000; /* map 256 K */
1151 #endif
1152         fs_enet_immap = ioremap(paddr, size);
1153         if (fs_enet_immap == NULL)
1154                 return -EBADF;  /* XXX ahem; maybe just BUG_ON? */
1155
1156         return 0;
1157 }
1158
1159 static void cleanup_immap(void)
1160 {
1161         if (fs_enet_immap != NULL) {
1162                 iounmap(fs_enet_immap);
1163                 fs_enet_immap = NULL;
1164         }
1165 }
1166
1167 /**************************************************************************************/
1168
1169 static int __devinit fs_enet_probe(struct device *dev)
1170 {
1171         struct net_device *ndev;
1172
1173         /* no fixup - no device */
1174         if (dev->platform_data == NULL) {
1175                 printk(KERN_INFO "fs_enet: "
1176                                 "probe called with no platform data; "
1177                                 "remove unused devices\n");
1178                 return -ENODEV;
1179         }
1180
1181         ndev = fs_init_instance(dev, dev->platform_data);
1182         if (IS_ERR(ndev))
1183                 return PTR_ERR(ndev);
1184         return 0;
1185 }
1186
1187 static int fs_enet_remove(struct device *dev)
1188 {
1189         return fs_cleanup_instance(dev_get_drvdata(dev));
1190 }
1191
1192 static struct device_driver fs_enet_fec_driver = {
1193         .name           = "fsl-cpm-fec",
1194         .bus            = &platform_bus_type,
1195         .probe          = fs_enet_probe,
1196         .remove         = fs_enet_remove,
1197 #ifdef CONFIG_PM
1198 /*      .suspend        = fs_enet_suspend,      TODO */
1199 /*      .resume         = fs_enet_resume,       TODO */
1200 #endif
1201 };
1202
1203 static struct device_driver fs_enet_scc_driver = {
1204         .name           = "fsl-cpm-scc",
1205         .bus            = &platform_bus_type,
1206         .probe          = fs_enet_probe,
1207         .remove         = fs_enet_remove,
1208 #ifdef CONFIG_PM
1209 /*      .suspend        = fs_enet_suspend,      TODO */
1210 /*      .resume         = fs_enet_resume,       TODO */
1211 #endif
1212 };
1213
1214 static struct device_driver fs_enet_fcc_driver = {
1215         .name           = "fsl-cpm-fcc",
1216         .bus            = &platform_bus_type,
1217         .probe          = fs_enet_probe,
1218         .remove         = fs_enet_remove,
1219 #ifdef CONFIG_PM
1220 /*      .suspend        = fs_enet_suspend,      TODO */
1221 /*      .resume         = fs_enet_resume,       TODO */
1222 #endif
1223 };
1224
1225 static int __init fs_init(void)
1226 {
1227         int r;
1228
1229         printk(KERN_INFO
1230                         "%s", version);
1231
1232         r = setup_immap();
1233         if (r != 0)
1234                 return r;
1235
1236 #ifdef CONFIG_FS_ENET_HAS_FCC
1237         /* let's insert mii stuff */
1238         r = fs_enet_mdio_bb_init();
1239
1240         if (r != 0) {
1241                 printk(KERN_ERR DRV_MODULE_NAME
1242                         "BB PHY init failed.\n");
1243                 return r;
1244         }
1245         r = driver_register(&fs_enet_fcc_driver);
1246         if (r != 0)
1247                 goto err;
1248 #endif
1249
1250 #ifdef CONFIG_FS_ENET_HAS_FEC
1251         r =  fs_enet_mdio_fec_init();
1252         if (r != 0) {
1253                 printk(KERN_ERR DRV_MODULE_NAME
1254                         "FEC PHY init failed.\n");
1255                 return r;
1256         }
1257
1258         r = driver_register(&fs_enet_fec_driver);
1259         if (r != 0)
1260                 goto err;
1261 #endif
1262
1263 #ifdef CONFIG_FS_ENET_HAS_SCC
1264         r = driver_register(&fs_enet_scc_driver);
1265         if (r != 0)
1266                 goto err;
1267 #endif
1268
1269         return 0;
1270 err:
1271         cleanup_immap();
1272         return r;
1273 }
1274
1275 static void __exit fs_cleanup(void)
1276 {
1277         driver_unregister(&fs_enet_fec_driver);
1278         driver_unregister(&fs_enet_fcc_driver);
1279         driver_unregister(&fs_enet_scc_driver);
1280         cleanup_immap();
1281 }
1282
1283 #ifdef CONFIG_NET_POLL_CONTROLLER
1284 static void fs_enet_netpoll(struct net_device *dev)
1285 {
1286        disable_irq(dev->irq);
1287        fs_enet_interrupt(dev->irq, dev, NULL);
1288        enable_irq(dev->irq);
1289 }
1290 #endif
1291
1292 /**************************************************************************************/
1293
1294 module_init(fs_init);
1295 module_exit(fs_cleanup);