Fixed parameter reordering in firmware log routine.
[linux-2.6] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala (kumar.gala@freescale.com)
10  *
11  * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
12  *
13  * This program is free software; you can redistribute  it and/or modify it
14  * under  the terms of  the GNU General  Public License as published by the
15  * Free Software Foundation;  either version 2 of the  License, or (at your
16  * option) any later version.
17  *
18  *  Gianfar:  AKA Lambda Draconis, "Dragon"
19  *  RA 11 31 24.2
20  *  Dec +69 19 52
21  *  V 3.84
22  *  B-V +1.62
23  *
24  *  Theory of operation
25  *  This driver is designed for the non-CPM ethernet controllers
26  *  on the 85xx and 83xx family of integrated processors
27  *
28  *  The driver is initialized through platform_device.  Structures which
29  *  define the configuration needed by the board are defined in a
30  *  board structure in arch/ppc/platforms (though I do not
31  *  discount the possibility that other architectures could one
32  *  day be supported.
33  *
34  *  The Gianfar Ethernet Controller uses a ring of buffer
35  *  descriptors.  The beginning is indicated by a register
36  *  pointing to the physical address of the start of the ring.
37  *  The end is determined by a "wrap" bit being set in the
38  *  last descriptor of the ring.
39  *
40  *  When a packet is received, the RXF bit in the
41  *  IEVENT register is set, triggering an interrupt when the
42  *  corresponding bit in the IMASK register is also set (if
43  *  interrupt coalescing is active, then the interrupt may not
44  *  happen immediately, but will wait until either a set number
45  *  of frames or amount of time have passed).  In NAPI, the
46  *  interrupt handler will signal there is work to be done, and
47  *  exit.  Without NAPI, the packet(s) will be handled
48  *  immediately.  Both methods will start at the last known empty
49  *  descriptor, and process every subsequent descriptor until there
50  *  are none left with data (NAPI will stop after a set number of
51  *  packets to give time to other tasks, but will eventually
52  *  process all the packets).  The data arrives inside a
53  *  pre-allocated skb, and so after the skb is passed up to the
54  *  stack, a new skb must be allocated, and the address field in
55  *  the buffer descriptor must be updated to indicate this new
56  *  skb.
57  *
58  *  When the kernel requests that a packet be transmitted, the
59  *  driver starts where it left off last time, and points the
60  *  descriptor at the buffer which was passed in.  The driver
61  *  then informs the DMA engine that there are packets ready to
62  *  be transmitted.  Once the controller is finished transmitting
63  *  the packet, an interrupt may be triggered (under the same
64  *  conditions as for reception, but depending on the TXF bit).
65  *  The driver then cleans up the buffer.
66  */
67
68 #include <linux/config.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/string.h>
72 #include <linux/errno.h>
73 #include <linux/unistd.h>
74 #include <linux/slab.h>
75 #include <linux/interrupt.h>
76 #include <linux/init.h>
77 #include <linux/delay.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/if_vlan.h>
82 #include <linux/spinlock.h>
83 #include <linux/mm.h>
84 #include <linux/platform_device.h>
85 #include <linux/ip.h>
86 #include <linux/tcp.h>
87 #include <linux/udp.h>
88
89 #include <asm/io.h>
90 #include <asm/irq.h>
91 #include <asm/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/version.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98
99 #include "gianfar.h"
100 #include "gianfar_mii.h"
101
102 #define TX_TIMEOUT      (1*HZ)
103 #define SKB_ALLOC_TIMEOUT 1000000
104 #undef BRIEF_GFAR_ERRORS
105 #undef VERBOSE_GFAR_ERRORS
106
107 #ifdef CONFIG_GFAR_NAPI
108 #define RECEIVE(x) netif_receive_skb(x)
109 #else
110 #define RECEIVE(x) netif_rx(x)
111 #endif
112
113 const char gfar_driver_name[] = "Gianfar Ethernet";
114 const char gfar_driver_version[] = "1.2";
115
116 static int gfar_enet_open(struct net_device *dev);
117 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
118 static void gfar_timeout(struct net_device *dev);
119 static int gfar_close(struct net_device *dev);
120 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
121 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
122 static int gfar_set_mac_address(struct net_device *dev);
123 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
125 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
126 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
127 static void adjust_link(struct net_device *dev);
128 static void init_registers(struct net_device *dev);
129 static int init_phy(struct net_device *dev);
130 static int gfar_probe(struct device *device);
131 static int gfar_remove(struct device *device);
132 static void free_skb_resources(struct gfar_private *priv);
133 static void gfar_set_multi(struct net_device *dev);
134 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
135 #ifdef CONFIG_GFAR_NAPI
136 static int gfar_poll(struct net_device *dev, int *budget);
137 #endif
138 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
139 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
140 static void gfar_vlan_rx_register(struct net_device *netdev,
141                                 struct vlan_group *grp);
142 static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
143
144 extern struct ethtool_ops gfar_ethtool_ops;
145
146 MODULE_AUTHOR("Freescale Semiconductor, Inc");
147 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148 MODULE_LICENSE("GPL");
149
150 int gfar_uses_fcb(struct gfar_private *priv)
151 {
152         if (priv->vlan_enable || priv->rx_csum_enable)
153                 return 1;
154         else
155                 return 0;
156 }
157
158 /* Set up the ethernet device structure, private data,
159  * and anything else we need before we start */
160 static int gfar_probe(struct device *device)
161 {
162         u32 tempval;
163         struct net_device *dev = NULL;
164         struct gfar_private *priv = NULL;
165         struct platform_device *pdev = to_platform_device(device);
166         struct gianfar_platform_data *einfo;
167         struct resource *r;
168         int idx;
169         int err = 0;
170
171         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
172
173         if (NULL == einfo) {
174                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
175                        pdev->id);
176
177                 return -ENODEV;
178         }
179
180         /* Create an ethernet device instance */
181         dev = alloc_etherdev(sizeof (*priv));
182
183         if (NULL == dev)
184                 return -ENOMEM;
185
186         priv = netdev_priv(dev);
187
188         /* Set the info in the priv to the current info */
189         priv->einfo = einfo;
190
191         /* fill out IRQ fields */
192         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
193                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
194                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
195                 priv->interruptError = platform_get_irq_byname(pdev, "error");
196         } else {
197                 priv->interruptTransmit = platform_get_irq(pdev, 0);
198         }
199
200         /* get a pointer to the register memory */
201         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
202         priv->regs = (struct gfar *)
203                 ioremap(r->start, sizeof (struct gfar));
204
205         if (NULL == priv->regs) {
206                 err = -ENOMEM;
207                 goto regs_fail;
208         }
209
210         spin_lock_init(&priv->lock);
211
212         dev_set_drvdata(device, dev);
213
214         /* Stop the DMA engine now, in case it was running before */
215         /* (The firmware could have used it, and left it running). */
216         /* To do this, we write Graceful Receive Stop and Graceful */
217         /* Transmit Stop, and then wait until the corresponding bits */
218         /* in IEVENT indicate the stops have completed. */
219         tempval = gfar_read(&priv->regs->dmactrl);
220         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
221         gfar_write(&priv->regs->dmactrl, tempval);
222
223         tempval = gfar_read(&priv->regs->dmactrl);
224         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
225         gfar_write(&priv->regs->dmactrl, tempval);
226
227         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
228                 cpu_relax();
229
230         /* Reset MAC layer */
231         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
232
233         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
234         gfar_write(&priv->regs->maccfg1, tempval);
235
236         /* Initialize MACCFG2. */
237         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
238
239         /* Initialize ECNTRL */
240         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
241
242         /* Copy the station address into the dev structure, */
243         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
244
245         /* Set the dev->base_addr to the gfar reg region */
246         dev->base_addr = (unsigned long) (priv->regs);
247
248         SET_MODULE_OWNER(dev);
249         SET_NETDEV_DEV(dev, device);
250
251         /* Fill in the dev structure */
252         dev->open = gfar_enet_open;
253         dev->hard_start_xmit = gfar_start_xmit;
254         dev->tx_timeout = gfar_timeout;
255         dev->watchdog_timeo = TX_TIMEOUT;
256 #ifdef CONFIG_GFAR_NAPI
257         dev->poll = gfar_poll;
258         dev->weight = GFAR_DEV_WEIGHT;
259 #endif
260         dev->stop = gfar_close;
261         dev->get_stats = gfar_get_stats;
262         dev->change_mtu = gfar_change_mtu;
263         dev->mtu = 1500;
264         dev->set_multicast_list = gfar_set_multi;
265
266         dev->ethtool_ops = &gfar_ethtool_ops;
267
268         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
269                 priv->rx_csum_enable = 1;
270                 dev->features |= NETIF_F_IP_CSUM;
271         } else
272                 priv->rx_csum_enable = 0;
273
274         priv->vlgrp = NULL;
275
276         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
277                 dev->vlan_rx_register = gfar_vlan_rx_register;
278                 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
279
280                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
281
282                 priv->vlan_enable = 1;
283         }
284
285         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
286                 priv->extended_hash = 1;
287                 priv->hash_width = 9;
288
289                 priv->hash_regs[0] = &priv->regs->igaddr0;
290                 priv->hash_regs[1] = &priv->regs->igaddr1;
291                 priv->hash_regs[2] = &priv->regs->igaddr2;
292                 priv->hash_regs[3] = &priv->regs->igaddr3;
293                 priv->hash_regs[4] = &priv->regs->igaddr4;
294                 priv->hash_regs[5] = &priv->regs->igaddr5;
295                 priv->hash_regs[6] = &priv->regs->igaddr6;
296                 priv->hash_regs[7] = &priv->regs->igaddr7;
297                 priv->hash_regs[8] = &priv->regs->gaddr0;
298                 priv->hash_regs[9] = &priv->regs->gaddr1;
299                 priv->hash_regs[10] = &priv->regs->gaddr2;
300                 priv->hash_regs[11] = &priv->regs->gaddr3;
301                 priv->hash_regs[12] = &priv->regs->gaddr4;
302                 priv->hash_regs[13] = &priv->regs->gaddr5;
303                 priv->hash_regs[14] = &priv->regs->gaddr6;
304                 priv->hash_regs[15] = &priv->regs->gaddr7;
305
306         } else {
307                 priv->extended_hash = 0;
308                 priv->hash_width = 8;
309
310                 priv->hash_regs[0] = &priv->regs->gaddr0;
311                 priv->hash_regs[1] = &priv->regs->gaddr1;
312                 priv->hash_regs[2] = &priv->regs->gaddr2;
313                 priv->hash_regs[3] = &priv->regs->gaddr3;
314                 priv->hash_regs[4] = &priv->regs->gaddr4;
315                 priv->hash_regs[5] = &priv->regs->gaddr5;
316                 priv->hash_regs[6] = &priv->regs->gaddr6;
317                 priv->hash_regs[7] = &priv->regs->gaddr7;
318         }
319
320         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
321                 priv->padding = DEFAULT_PADDING;
322         else
323                 priv->padding = 0;
324
325         dev->hard_header_len += priv->padding;
326
327         if (dev->features & NETIF_F_IP_CSUM)
328                 dev->hard_header_len += GMAC_FCB_LEN;
329
330         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
331 #ifdef CONFIG_GFAR_BUFSTASH
332         priv->rx_stash_size = STASH_LENGTH;
333 #endif
334         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
335         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
336
337         priv->txcoalescing = DEFAULT_TX_COALESCE;
338         priv->txcount = DEFAULT_TXCOUNT;
339         priv->txtime = DEFAULT_TXTIME;
340         priv->rxcoalescing = DEFAULT_RX_COALESCE;
341         priv->rxcount = DEFAULT_RXCOUNT;
342         priv->rxtime = DEFAULT_RXTIME;
343
344         /* Enable most messages by default */
345         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
346
347         err = register_netdev(dev);
348
349         if (err) {
350                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
351                                 dev->name);
352                 goto register_fail;
353         }
354
355         /* Print out the device info */
356         printk(KERN_INFO DEVICE_NAME, dev->name);
357         for (idx = 0; idx < 6; idx++)
358                 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
359         printk("\n");
360
361         /* Even more device info helps when determining which kernel */
362         /* provided which set of benchmarks.  Since this is global for all */
363         /* devices, we only print it once */
364 #ifdef CONFIG_GFAR_NAPI
365         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
366 #else
367         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
368 #endif
369         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
370                dev->name, priv->rx_ring_size, priv->tx_ring_size);
371
372         return 0;
373
374 register_fail:
375         iounmap((void *) priv->regs);
376 regs_fail:
377         free_netdev(dev);
378         return err;
379 }
380
381 static int gfar_remove(struct device *device)
382 {
383         struct net_device *dev = dev_get_drvdata(device);
384         struct gfar_private *priv = netdev_priv(dev);
385
386         dev_set_drvdata(device, NULL);
387
388         iounmap((void *) priv->regs);
389         free_netdev(dev);
390
391         return 0;
392 }
393
394
395 /* Initializes driver's PHY state, and attaches to the PHY.
396  * Returns 0 on success.
397  */
398 static int init_phy(struct net_device *dev)
399 {
400         struct gfar_private *priv = netdev_priv(dev);
401         uint gigabit_support =
402                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
403                 SUPPORTED_1000baseT_Full : 0;
404         struct phy_device *phydev;
405
406         priv->oldlink = 0;
407         priv->oldspeed = 0;
408         priv->oldduplex = -1;
409
410         phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
411
412         if (IS_ERR(phydev)) {
413                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
414                 return PTR_ERR(phydev);
415         }
416
417         /* Remove any features not supported by the controller */
418         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
419         phydev->advertising = phydev->supported;
420
421         priv->phydev = phydev;
422
423         return 0;
424 }
425
426 static void init_registers(struct net_device *dev)
427 {
428         struct gfar_private *priv = netdev_priv(dev);
429
430         /* Clear IEVENT */
431         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
432
433         /* Initialize IMASK */
434         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
435
436         /* Init hash registers to zero */
437         gfar_write(&priv->regs->igaddr0, 0);
438         gfar_write(&priv->regs->igaddr1, 0);
439         gfar_write(&priv->regs->igaddr2, 0);
440         gfar_write(&priv->regs->igaddr3, 0);
441         gfar_write(&priv->regs->igaddr4, 0);
442         gfar_write(&priv->regs->igaddr5, 0);
443         gfar_write(&priv->regs->igaddr6, 0);
444         gfar_write(&priv->regs->igaddr7, 0);
445
446         gfar_write(&priv->regs->gaddr0, 0);
447         gfar_write(&priv->regs->gaddr1, 0);
448         gfar_write(&priv->regs->gaddr2, 0);
449         gfar_write(&priv->regs->gaddr3, 0);
450         gfar_write(&priv->regs->gaddr4, 0);
451         gfar_write(&priv->regs->gaddr5, 0);
452         gfar_write(&priv->regs->gaddr6, 0);
453         gfar_write(&priv->regs->gaddr7, 0);
454
455         /* Zero out the rmon mib registers if it has them */
456         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
457                 memset((void *) &(priv->regs->rmon), 0,
458                        sizeof (struct rmon_mib));
459
460                 /* Mask off the CAM interrupts */
461                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
462                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
463         }
464
465         /* Initialize the max receive buffer length */
466         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
467
468 #ifdef CONFIG_GFAR_BUFSTASH
469         /* If we are stashing buffers, we need to set the
470          * extraction length to the size of the buffer */
471         gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
472 #endif
473
474         /* Initialize the Minimum Frame Length Register */
475         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
476
477         /* Setup Attributes so that snooping is on for rx */
478         gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
479         gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
480
481         /* Assign the TBI an address which won't conflict with the PHYs */
482         gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
483 }
484
485
486 /* Halt the receive and transmit queues */
487 void gfar_halt(struct net_device *dev)
488 {
489         struct gfar_private *priv = netdev_priv(dev);
490         struct gfar *regs = priv->regs;
491         u32 tempval;
492
493         /* Mask all interrupts */
494         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
495
496         /* Clear all interrupts */
497         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
498
499         /* Stop the DMA, and wait for it to stop */
500         tempval = gfar_read(&priv->regs->dmactrl);
501         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
502             != (DMACTRL_GRS | DMACTRL_GTS)) {
503                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
504                 gfar_write(&priv->regs->dmactrl, tempval);
505
506                 while (!(gfar_read(&priv->regs->ievent) &
507                          (IEVENT_GRSC | IEVENT_GTSC)))
508                         cpu_relax();
509         }
510
511         /* Disable Rx and Tx */
512         tempval = gfar_read(&regs->maccfg1);
513         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
514         gfar_write(&regs->maccfg1, tempval);
515 }
516
517 void stop_gfar(struct net_device *dev)
518 {
519         struct gfar_private *priv = netdev_priv(dev);
520         struct gfar *regs = priv->regs;
521         unsigned long flags;
522
523         phy_stop(priv->phydev);
524
525         /* Lock it down */
526         spin_lock_irqsave(&priv->lock, flags);
527
528         gfar_halt(dev);
529
530         spin_unlock_irqrestore(&priv->lock, flags);
531
532         /* Free the IRQs */
533         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
534                 free_irq(priv->interruptError, dev);
535                 free_irq(priv->interruptTransmit, dev);
536                 free_irq(priv->interruptReceive, dev);
537         } else {
538                 free_irq(priv->interruptTransmit, dev);
539         }
540
541         free_skb_resources(priv);
542
543         dma_free_coherent(NULL,
544                         sizeof(struct txbd8)*priv->tx_ring_size
545                         + sizeof(struct rxbd8)*priv->rx_ring_size,
546                         priv->tx_bd_base,
547                         gfar_read(&regs->tbase0));
548 }
549
550 /* If there are any tx skbs or rx skbs still around, free them.
551  * Then free tx_skbuff and rx_skbuff */
552 static void free_skb_resources(struct gfar_private *priv)
553 {
554         struct rxbd8 *rxbdp;
555         struct txbd8 *txbdp;
556         int i;
557
558         /* Go through all the buffer descriptors and free their data buffers */
559         txbdp = priv->tx_bd_base;
560
561         for (i = 0; i < priv->tx_ring_size; i++) {
562
563                 if (priv->tx_skbuff[i]) {
564                         dma_unmap_single(NULL, txbdp->bufPtr,
565                                         txbdp->length,
566                                         DMA_TO_DEVICE);
567                         dev_kfree_skb_any(priv->tx_skbuff[i]);
568                         priv->tx_skbuff[i] = NULL;
569                 }
570         }
571
572         kfree(priv->tx_skbuff);
573
574         rxbdp = priv->rx_bd_base;
575
576         /* rx_skbuff is not guaranteed to be allocated, so only
577          * free it and its contents if it is allocated */
578         if(priv->rx_skbuff != NULL) {
579                 for (i = 0; i < priv->rx_ring_size; i++) {
580                         if (priv->rx_skbuff[i]) {
581                                 dma_unmap_single(NULL, rxbdp->bufPtr,
582                                                 priv->rx_buffer_size
583                                                 + RXBUF_ALIGNMENT,
584                                                 DMA_FROM_DEVICE);
585
586                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
587                                 priv->rx_skbuff[i] = NULL;
588                         }
589
590                         rxbdp->status = 0;
591                         rxbdp->length = 0;
592                         rxbdp->bufPtr = 0;
593
594                         rxbdp++;
595                 }
596
597                 kfree(priv->rx_skbuff);
598         }
599 }
600
601 void gfar_start(struct net_device *dev)
602 {
603         struct gfar_private *priv = netdev_priv(dev);
604         struct gfar *regs = priv->regs;
605         u32 tempval;
606
607         /* Enable Rx and Tx in MACCFG1 */
608         tempval = gfar_read(&regs->maccfg1);
609         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
610         gfar_write(&regs->maccfg1, tempval);
611
612         /* Initialize DMACTRL to have WWR and WOP */
613         tempval = gfar_read(&priv->regs->dmactrl);
614         tempval |= DMACTRL_INIT_SETTINGS;
615         gfar_write(&priv->regs->dmactrl, tempval);
616
617         /* Clear THLT, so that the DMA starts polling now */
618         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
619
620         /* Make sure we aren't stopped */
621         tempval = gfar_read(&priv->regs->dmactrl);
622         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
623         gfar_write(&priv->regs->dmactrl, tempval);
624
625         /* Unmask the interrupts we look for */
626         gfar_write(&regs->imask, IMASK_DEFAULT);
627 }
628
629 /* Bring the controller up and running */
630 int startup_gfar(struct net_device *dev)
631 {
632         struct txbd8 *txbdp;
633         struct rxbd8 *rxbdp;
634         dma_addr_t addr;
635         unsigned long vaddr;
636         int i;
637         struct gfar_private *priv = netdev_priv(dev);
638         struct gfar *regs = priv->regs;
639         int err = 0;
640         u32 rctrl = 0;
641
642         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
643
644         /* Allocate memory for the buffer descriptors */
645         vaddr = (unsigned long) dma_alloc_coherent(NULL,
646                         sizeof (struct txbd8) * priv->tx_ring_size +
647                         sizeof (struct rxbd8) * priv->rx_ring_size,
648                         &addr, GFP_KERNEL);
649
650         if (vaddr == 0) {
651                 if (netif_msg_ifup(priv))
652                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
653                                         dev->name);
654                 return -ENOMEM;
655         }
656
657         priv->tx_bd_base = (struct txbd8 *) vaddr;
658
659         /* enet DMA only understands physical addresses */
660         gfar_write(&regs->tbase0, addr);
661
662         /* Start the rx descriptor ring where the tx ring leaves off */
663         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
664         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
665         priv->rx_bd_base = (struct rxbd8 *) vaddr;
666         gfar_write(&regs->rbase0, addr);
667
668         /* Setup the skbuff rings */
669         priv->tx_skbuff =
670             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
671                                         priv->tx_ring_size, GFP_KERNEL);
672
673         if (NULL == priv->tx_skbuff) {
674                 if (netif_msg_ifup(priv))
675                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
676                                         dev->name);
677                 err = -ENOMEM;
678                 goto tx_skb_fail;
679         }
680
681         for (i = 0; i < priv->tx_ring_size; i++)
682                 priv->tx_skbuff[i] = NULL;
683
684         priv->rx_skbuff =
685             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
686                                         priv->rx_ring_size, GFP_KERNEL);
687
688         if (NULL == priv->rx_skbuff) {
689                 if (netif_msg_ifup(priv))
690                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
691                                         dev->name);
692                 err = -ENOMEM;
693                 goto rx_skb_fail;
694         }
695
696         for (i = 0; i < priv->rx_ring_size; i++)
697                 priv->rx_skbuff[i] = NULL;
698
699         /* Initialize some variables in our dev structure */
700         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
701         priv->cur_rx = priv->rx_bd_base;
702         priv->skb_curtx = priv->skb_dirtytx = 0;
703         priv->skb_currx = 0;
704
705         /* Initialize Transmit Descriptor Ring */
706         txbdp = priv->tx_bd_base;
707         for (i = 0; i < priv->tx_ring_size; i++) {
708                 txbdp->status = 0;
709                 txbdp->length = 0;
710                 txbdp->bufPtr = 0;
711                 txbdp++;
712         }
713
714         /* Set the last descriptor in the ring to indicate wrap */
715         txbdp--;
716         txbdp->status |= TXBD_WRAP;
717
718         rxbdp = priv->rx_bd_base;
719         for (i = 0; i < priv->rx_ring_size; i++) {
720                 struct sk_buff *skb = NULL;
721
722                 rxbdp->status = 0;
723
724                 skb = gfar_new_skb(dev, rxbdp);
725
726                 priv->rx_skbuff[i] = skb;
727
728                 rxbdp++;
729         }
730
731         /* Set the last descriptor in the ring to wrap */
732         rxbdp--;
733         rxbdp->status |= RXBD_WRAP;
734
735         /* If the device has multiple interrupts, register for
736          * them.  Otherwise, only register for the one */
737         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
738                 /* Install our interrupt handlers for Error,
739                  * Transmit, and Receive */
740                 if (request_irq(priv->interruptError, gfar_error,
741                                 0, "enet_error", dev) < 0) {
742                         if (netif_msg_intr(priv))
743                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
744                                         dev->name, priv->interruptError);
745
746                         err = -1;
747                         goto err_irq_fail;
748                 }
749
750                 if (request_irq(priv->interruptTransmit, gfar_transmit,
751                                 0, "enet_tx", dev) < 0) {
752                         if (netif_msg_intr(priv))
753                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
754                                         dev->name, priv->interruptTransmit);
755
756                         err = -1;
757
758                         goto tx_irq_fail;
759                 }
760
761                 if (request_irq(priv->interruptReceive, gfar_receive,
762                                 0, "enet_rx", dev) < 0) {
763                         if (netif_msg_intr(priv))
764                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
765                                                 dev->name, priv->interruptReceive);
766
767                         err = -1;
768                         goto rx_irq_fail;
769                 }
770         } else {
771                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
772                                 0, "gfar_interrupt", dev) < 0) {
773                         if (netif_msg_intr(priv))
774                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
775                                         dev->name, priv->interruptError);
776
777                         err = -1;
778                         goto err_irq_fail;
779                 }
780         }
781
782         phy_start(priv->phydev);
783
784         /* Configure the coalescing support */
785         if (priv->txcoalescing)
786                 gfar_write(&regs->txic,
787                            mk_ic_value(priv->txcount, priv->txtime));
788         else
789                 gfar_write(&regs->txic, 0);
790
791         if (priv->rxcoalescing)
792                 gfar_write(&regs->rxic,
793                            mk_ic_value(priv->rxcount, priv->rxtime));
794         else
795                 gfar_write(&regs->rxic, 0);
796
797         if (priv->rx_csum_enable)
798                 rctrl |= RCTRL_CHECKSUMMING;
799
800         if (priv->extended_hash)
801                 rctrl |= RCTRL_EXTHASH;
802
803         if (priv->vlan_enable)
804                 rctrl |= RCTRL_VLAN;
805
806         /* Init rctrl based on our settings */
807         gfar_write(&priv->regs->rctrl, rctrl);
808
809         if (dev->features & NETIF_F_IP_CSUM)
810                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
811
812         gfar_start(dev);
813
814         return 0;
815
816 rx_irq_fail:
817         free_irq(priv->interruptTransmit, dev);
818 tx_irq_fail:
819         free_irq(priv->interruptError, dev);
820 err_irq_fail:
821 rx_skb_fail:
822         free_skb_resources(priv);
823 tx_skb_fail:
824         dma_free_coherent(NULL,
825                         sizeof(struct txbd8)*priv->tx_ring_size
826                         + sizeof(struct rxbd8)*priv->rx_ring_size,
827                         priv->tx_bd_base,
828                         gfar_read(&regs->tbase0));
829
830         return err;
831 }
832
833 /* Called when something needs to use the ethernet device */
834 /* Returns 0 for success. */
835 static int gfar_enet_open(struct net_device *dev)
836 {
837         int err;
838
839         /* Initialize a bunch of registers */
840         init_registers(dev);
841
842         gfar_set_mac_address(dev);
843
844         err = init_phy(dev);
845
846         if(err)
847                 return err;
848
849         err = startup_gfar(dev);
850
851         netif_start_queue(dev);
852
853         return err;
854 }
855
856 static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
857 {
858         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
859
860         memset(fcb, 0, GMAC_FCB_LEN);
861
862         /* Flag the bd so the controller looks for the FCB */
863         bdp->status |= TXBD_TOE;
864
865         return fcb;
866 }
867
868 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
869 {
870         int len;
871
872         /* If we're here, it's a IP packet with a TCP or UDP
873          * payload.  We set it to checksum, using a pseudo-header
874          * we provide
875          */
876         fcb->ip = 1;
877         fcb->tup = 1;
878         fcb->ctu = 1;
879         fcb->nph = 1;
880
881         /* Notify the controller what the protocol is */
882         if (skb->nh.iph->protocol == IPPROTO_UDP)
883                 fcb->udp = 1;
884
885         /* l3os is the distance between the start of the
886          * frame (skb->data) and the start of the IP hdr.
887          * l4os is the distance between the start of the
888          * l3 hdr and the l4 hdr */
889         fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
890         fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
891
892         len = skb->nh.iph->tot_len - fcb->l4os;
893
894         /* Provide the pseudoheader csum */
895         fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr,
896                         skb->nh.iph->daddr, len,
897                         skb->nh.iph->protocol, 0);
898 }
899
900 void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
901 {
902         fcb->vln = 1;
903         fcb->vlctl = vlan_tx_tag_get(skb);
904 }
905
906 /* This is called by the kernel when a frame is ready for transmission. */
907 /* It is pointed to by the dev->hard_start_xmit function pointer */
908 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
909 {
910         struct gfar_private *priv = netdev_priv(dev);
911         struct txfcb *fcb = NULL;
912         struct txbd8 *txbdp;
913
914         /* Update transmit stats */
915         priv->stats.tx_bytes += skb->len;
916
917         /* Lock priv now */
918         spin_lock_irq(&priv->lock);
919
920         /* Point at the first free tx descriptor */
921         txbdp = priv->cur_tx;
922
923         /* Clear all but the WRAP status flags */
924         txbdp->status &= TXBD_WRAP;
925
926         /* Set up checksumming */
927         if ((dev->features & NETIF_F_IP_CSUM)
928                         && (CHECKSUM_HW == skb->ip_summed)) {
929                 fcb = gfar_add_fcb(skb, txbdp);
930                 gfar_tx_checksum(skb, fcb);
931         }
932
933         if (priv->vlan_enable &&
934                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
935                 if (NULL == fcb)
936                         fcb = gfar_add_fcb(skb, txbdp);
937
938                 gfar_tx_vlan(skb, fcb);
939         }
940
941         /* Set buffer length and pointer */
942         txbdp->length = skb->len;
943         txbdp->bufPtr = dma_map_single(NULL, skb->data,
944                         skb->len, DMA_TO_DEVICE);
945
946         /* Save the skb pointer so we can free it later */
947         priv->tx_skbuff[priv->skb_curtx] = skb;
948
949         /* Update the current skb pointer (wrapping if this was the last) */
950         priv->skb_curtx =
951             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
952
953         /* Flag the BD as interrupt-causing */
954         txbdp->status |= TXBD_INTERRUPT;
955
956         /* Flag the BD as ready to go, last in frame, and  */
957         /* in need of CRC */
958         txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
959
960         dev->trans_start = jiffies;
961
962         /* If this was the last BD in the ring, the next one */
963         /* is at the beginning of the ring */
964         if (txbdp->status & TXBD_WRAP)
965                 txbdp = priv->tx_bd_base;
966         else
967                 txbdp++;
968
969         /* If the next BD still needs to be cleaned up, then the bds
970            are full.  We need to tell the kernel to stop sending us stuff. */
971         if (txbdp == priv->dirty_tx) {
972                 netif_stop_queue(dev);
973
974                 priv->stats.tx_fifo_errors++;
975         }
976
977         /* Update the current txbd to the next one */
978         priv->cur_tx = txbdp;
979
980         /* Tell the DMA to go go go */
981         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
982
983         /* Unlock priv */
984         spin_unlock_irq(&priv->lock);
985
986         return 0;
987 }
988
989 /* Stops the kernel queue, and halts the controller */
990 static int gfar_close(struct net_device *dev)
991 {
992         struct gfar_private *priv = netdev_priv(dev);
993         stop_gfar(dev);
994
995         /* Disconnect from the PHY */
996         phy_disconnect(priv->phydev);
997         priv->phydev = NULL;
998
999         netif_stop_queue(dev);
1000
1001         return 0;
1002 }
1003
1004 /* returns a net_device_stats structure pointer */
1005 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1006 {
1007         struct gfar_private *priv = netdev_priv(dev);
1008
1009         return &(priv->stats);
1010 }
1011
1012 /* Changes the mac address if the controller is not running. */
1013 int gfar_set_mac_address(struct net_device *dev)
1014 {
1015         struct gfar_private *priv = netdev_priv(dev);
1016         int i;
1017         char tmpbuf[MAC_ADDR_LEN];
1018         u32 tempval;
1019
1020         /* Now copy it into the mac registers backwards, cuz */
1021         /* little endian is silly */
1022         for (i = 0; i < MAC_ADDR_LEN; i++)
1023                 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
1024
1025         gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
1026
1027         tempval = *((u32 *) (tmpbuf + 4));
1028
1029         gfar_write(&priv->regs->macstnaddr2, tempval);
1030
1031         return 0;
1032 }
1033
1034
1035 /* Enables and disables VLAN insertion/extraction */
1036 static void gfar_vlan_rx_register(struct net_device *dev,
1037                 struct vlan_group *grp)
1038 {
1039         struct gfar_private *priv = netdev_priv(dev);
1040         unsigned long flags;
1041         u32 tempval;
1042
1043         spin_lock_irqsave(&priv->lock, flags);
1044
1045         priv->vlgrp = grp;
1046
1047         if (grp) {
1048                 /* Enable VLAN tag insertion */
1049                 tempval = gfar_read(&priv->regs->tctrl);
1050                 tempval |= TCTRL_VLINS;
1051
1052                 gfar_write(&priv->regs->tctrl, tempval);
1053                 
1054                 /* Enable VLAN tag extraction */
1055                 tempval = gfar_read(&priv->regs->rctrl);
1056                 tempval |= RCTRL_VLEX;
1057                 gfar_write(&priv->regs->rctrl, tempval);
1058         } else {
1059                 /* Disable VLAN tag insertion */
1060                 tempval = gfar_read(&priv->regs->tctrl);
1061                 tempval &= ~TCTRL_VLINS;
1062                 gfar_write(&priv->regs->tctrl, tempval);
1063
1064                 /* Disable VLAN tag extraction */
1065                 tempval = gfar_read(&priv->regs->rctrl);
1066                 tempval &= ~RCTRL_VLEX;
1067                 gfar_write(&priv->regs->rctrl, tempval);
1068         }
1069
1070         spin_unlock_irqrestore(&priv->lock, flags);
1071 }
1072
1073
1074 static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1075 {
1076         struct gfar_private *priv = netdev_priv(dev);
1077         unsigned long flags;
1078
1079         spin_lock_irqsave(&priv->lock, flags);
1080
1081         if (priv->vlgrp)
1082                 priv->vlgrp->vlan_devices[vid] = NULL;
1083
1084         spin_unlock_irqrestore(&priv->lock, flags);
1085 }
1086
1087
1088 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1089 {
1090         int tempsize, tempval;
1091         struct gfar_private *priv = netdev_priv(dev);
1092         int oldsize = priv->rx_buffer_size;
1093         int frame_size = new_mtu + ETH_HLEN;
1094
1095         if (priv->vlan_enable)
1096                 frame_size += VLAN_ETH_HLEN;
1097
1098         if (gfar_uses_fcb(priv))
1099                 frame_size += GMAC_FCB_LEN;
1100
1101         frame_size += priv->padding;
1102
1103         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1104                 if (netif_msg_drv(priv))
1105                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1106                                         dev->name);
1107                 return -EINVAL;
1108         }
1109
1110         tempsize =
1111             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1112             INCREMENTAL_BUFFER_SIZE;
1113
1114         /* Only stop and start the controller if it isn't already
1115          * stopped */
1116         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1117                 stop_gfar(dev);
1118
1119         priv->rx_buffer_size = tempsize;
1120
1121         dev->mtu = new_mtu;
1122
1123         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1124         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1125
1126         /* If the mtu is larger than the max size for standard
1127          * ethernet frames (ie, a jumbo frame), then set maccfg2
1128          * to allow huge frames, and to check the length */
1129         tempval = gfar_read(&priv->regs->maccfg2);
1130
1131         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1132                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1133         else
1134                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1135
1136         gfar_write(&priv->regs->maccfg2, tempval);
1137
1138         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1139                 startup_gfar(dev);
1140
1141         return 0;
1142 }
1143
1144 /* gfar_timeout gets called when a packet has not been
1145  * transmitted after a set amount of time.
1146  * For now, assume that clearing out all the structures, and
1147  * starting over will fix the problem. */
1148 static void gfar_timeout(struct net_device *dev)
1149 {
1150         struct gfar_private *priv = netdev_priv(dev);
1151
1152         priv->stats.tx_errors++;
1153
1154         if (dev->flags & IFF_UP) {
1155                 stop_gfar(dev);
1156                 startup_gfar(dev);
1157         }
1158
1159         netif_schedule(dev);
1160 }
1161
1162 /* Interrupt Handler for Transmit complete */
1163 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1164 {
1165         struct net_device *dev = (struct net_device *) dev_id;
1166         struct gfar_private *priv = netdev_priv(dev);
1167         struct txbd8 *bdp;
1168
1169         /* Clear IEVENT */
1170         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1171
1172         /* Lock priv */
1173         spin_lock(&priv->lock);
1174         bdp = priv->dirty_tx;
1175         while ((bdp->status & TXBD_READY) == 0) {
1176                 /* If dirty_tx and cur_tx are the same, then either the */
1177                 /* ring is empty or full now (it could only be full in the beginning, */
1178                 /* obviously).  If it is empty, we are done. */
1179                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1180                         break;
1181
1182                 priv->stats.tx_packets++;
1183
1184                 /* Deferred means some collisions occurred during transmit, */
1185                 /* but we eventually sent the packet. */
1186                 if (bdp->status & TXBD_DEF)
1187                         priv->stats.collisions++;
1188
1189                 /* Free the sk buffer associated with this TxBD */
1190                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1191                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1192                 priv->skb_dirtytx =
1193                     (priv->skb_dirtytx +
1194                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1195
1196                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1197                 if (bdp->status & TXBD_WRAP)
1198                         bdp = priv->tx_bd_base;
1199                 else
1200                         bdp++;
1201
1202                 /* Move dirty_tx to be the next bd */
1203                 priv->dirty_tx = bdp;
1204
1205                 /* We freed a buffer, so now we can restart transmission */
1206                 if (netif_queue_stopped(dev))
1207                         netif_wake_queue(dev);
1208         } /* while ((bdp->status & TXBD_READY) == 0) */
1209
1210         /* If we are coalescing the interrupts, reset the timer */
1211         /* Otherwise, clear it */
1212         if (priv->txcoalescing)
1213                 gfar_write(&priv->regs->txic,
1214                            mk_ic_value(priv->txcount, priv->txtime));
1215         else
1216                 gfar_write(&priv->regs->txic, 0);
1217
1218         spin_unlock(&priv->lock);
1219
1220         return IRQ_HANDLED;
1221 }
1222
1223 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1224 {
1225         struct gfar_private *priv = netdev_priv(dev);
1226         struct sk_buff *skb = NULL;
1227         unsigned int timeout = SKB_ALLOC_TIMEOUT;
1228
1229         /* We have to allocate the skb, so keep trying till we succeed */
1230         while ((!skb) && timeout--)
1231                 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1232
1233         if (NULL == skb)
1234                 return NULL;
1235
1236         /* We need the data buffer to be aligned properly.  We will reserve
1237          * as many bytes as needed to align the data properly
1238          */
1239         skb_reserve(skb,
1240                     RXBUF_ALIGNMENT -
1241                     (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
1242
1243         skb->dev = dev;
1244
1245         bdp->bufPtr = dma_map_single(NULL, skb->data,
1246                         priv->rx_buffer_size + RXBUF_ALIGNMENT,
1247                         DMA_FROM_DEVICE);
1248
1249         bdp->length = 0;
1250
1251         /* Mark the buffer empty */
1252         bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1253
1254         return skb;
1255 }
1256
1257 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1258 {
1259         struct net_device_stats *stats = &priv->stats;
1260         struct gfar_extra_stats *estats = &priv->extra_stats;
1261
1262         /* If the packet was truncated, none of the other errors
1263          * matter */
1264         if (status & RXBD_TRUNCATED) {
1265                 stats->rx_length_errors++;
1266
1267                 estats->rx_trunc++;
1268
1269                 return;
1270         }
1271         /* Count the errors, if there were any */
1272         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1273                 stats->rx_length_errors++;
1274
1275                 if (status & RXBD_LARGE)
1276                         estats->rx_large++;
1277                 else
1278                         estats->rx_short++;
1279         }
1280         if (status & RXBD_NONOCTET) {
1281                 stats->rx_frame_errors++;
1282                 estats->rx_nonoctet++;
1283         }
1284         if (status & RXBD_CRCERR) {
1285                 estats->rx_crcerr++;
1286                 stats->rx_crc_errors++;
1287         }
1288         if (status & RXBD_OVERRUN) {
1289                 estats->rx_overrun++;
1290                 stats->rx_crc_errors++;
1291         }
1292 }
1293
1294 irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1295 {
1296         struct net_device *dev = (struct net_device *) dev_id;
1297         struct gfar_private *priv = netdev_priv(dev);
1298
1299 #ifdef CONFIG_GFAR_NAPI
1300         u32 tempval;
1301 #endif
1302
1303         /* Clear IEVENT, so rx interrupt isn't called again
1304          * because of this interrupt */
1305         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1306
1307         /* support NAPI */
1308 #ifdef CONFIG_GFAR_NAPI
1309         if (netif_rx_schedule_prep(dev)) {
1310                 tempval = gfar_read(&priv->regs->imask);
1311                 tempval &= IMASK_RX_DISABLED;
1312                 gfar_write(&priv->regs->imask, tempval);
1313
1314                 __netif_rx_schedule(dev);
1315         } else {
1316                 if (netif_msg_rx_err(priv))
1317                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1318                                 dev->name, gfar_read(&priv->regs->ievent),
1319                                 gfar_read(&priv->regs->imask));
1320         }
1321 #else
1322
1323         spin_lock(&priv->lock);
1324         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1325
1326         /* If we are coalescing interrupts, update the timer */
1327         /* Otherwise, clear it */
1328         if (priv->rxcoalescing)
1329                 gfar_write(&priv->regs->rxic,
1330                            mk_ic_value(priv->rxcount, priv->rxtime));
1331         else
1332                 gfar_write(&priv->regs->rxic, 0);
1333
1334         spin_unlock(&priv->lock);
1335 #endif
1336
1337         return IRQ_HANDLED;
1338 }
1339
1340 static inline int gfar_rx_vlan(struct sk_buff *skb,
1341                 struct vlan_group *vlgrp, unsigned short vlctl)
1342 {
1343 #ifdef CONFIG_GFAR_NAPI
1344         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1345 #else
1346         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1347 #endif
1348 }
1349
1350 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1351 {
1352         /* If valid headers were found, and valid sums
1353          * were verified, then we tell the kernel that no
1354          * checksumming is necessary.  Otherwise, it is */
1355         if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu)
1356                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1357         else
1358                 skb->ip_summed = CHECKSUM_NONE;
1359 }
1360
1361
1362 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1363 {
1364         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1365
1366         /* Remove the FCB from the skb */
1367         skb_pull(skb, GMAC_FCB_LEN);
1368
1369         return fcb;
1370 }
1371
1372 /* gfar_process_frame() -- handle one incoming packet if skb
1373  * isn't NULL.  */
1374 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1375                 int length)
1376 {
1377         struct gfar_private *priv = netdev_priv(dev);
1378         struct rxfcb *fcb = NULL;
1379
1380         if (NULL == skb) {
1381                 if (netif_msg_rx_err(priv))
1382                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1383                 priv->stats.rx_dropped++;
1384                 priv->extra_stats.rx_skbmissing++;
1385         } else {
1386                 int ret;
1387
1388                 /* Prep the skb for the packet */
1389                 skb_put(skb, length);
1390
1391                 /* Grab the FCB if there is one */
1392                 if (gfar_uses_fcb(priv))
1393                         fcb = gfar_get_fcb(skb);
1394
1395                 /* Remove the padded bytes, if there are any */
1396                 if (priv->padding)
1397                         skb_pull(skb, priv->padding);
1398
1399                 if (priv->rx_csum_enable)
1400                         gfar_rx_checksum(skb, fcb);
1401
1402                 /* Tell the skb what kind of packet this is */
1403                 skb->protocol = eth_type_trans(skb, dev);
1404
1405                 /* Send the packet up the stack */
1406                 if (unlikely(priv->vlgrp && fcb->vln))
1407                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1408                 else
1409                         ret = RECEIVE(skb);
1410
1411                 if (NET_RX_DROP == ret)
1412                         priv->extra_stats.kernel_dropped++;
1413         }
1414
1415         return 0;
1416 }
1417
1418 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1419  *   until the budget/quota has been reached. Returns the number
1420  *   of frames handled
1421  */
1422 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1423 {
1424         struct rxbd8 *bdp;
1425         struct sk_buff *skb;
1426         u16 pkt_len;
1427         int howmany = 0;
1428         struct gfar_private *priv = netdev_priv(dev);
1429
1430         /* Get the first full descriptor */
1431         bdp = priv->cur_rx;
1432
1433         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1434                 skb = priv->rx_skbuff[priv->skb_currx];
1435
1436                 if (!(bdp->status &
1437                       (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1438                        | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1439                         /* Increment the number of packets */
1440                         priv->stats.rx_packets++;
1441                         howmany++;
1442
1443                         /* Remove the FCS from the packet length */
1444                         pkt_len = bdp->length - 4;
1445
1446                         gfar_process_frame(dev, skb, pkt_len);
1447
1448                         priv->stats.rx_bytes += pkt_len;
1449                 } else {
1450                         count_errors(bdp->status, priv);
1451
1452                         if (skb)
1453                                 dev_kfree_skb_any(skb);
1454
1455                         priv->rx_skbuff[priv->skb_currx] = NULL;
1456                 }
1457
1458                 dev->last_rx = jiffies;
1459
1460                 /* Clear the status flags for this buffer */
1461                 bdp->status &= ~RXBD_STATS;
1462
1463                 /* Add another skb for the future */
1464                 skb = gfar_new_skb(dev, bdp);
1465                 priv->rx_skbuff[priv->skb_currx] = skb;
1466
1467                 /* Update to the next pointer */
1468                 if (bdp->status & RXBD_WRAP)
1469                         bdp = priv->rx_bd_base;
1470                 else
1471                         bdp++;
1472
1473                 /* update to point at the next skb */
1474                 priv->skb_currx =
1475                     (priv->skb_currx +
1476                      1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1477
1478         }
1479
1480         /* Update the current rxbd pointer to be the next one */
1481         priv->cur_rx = bdp;
1482
1483         /* If no packets have arrived since the
1484          * last one we processed, clear the IEVENT RX and
1485          * BSY bits so that another interrupt won't be
1486          * generated when we set IMASK */
1487         if (bdp->status & RXBD_EMPTY)
1488                 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1489
1490         return howmany;
1491 }
1492
1493 #ifdef CONFIG_GFAR_NAPI
1494 static int gfar_poll(struct net_device *dev, int *budget)
1495 {
1496         int howmany;
1497         struct gfar_private *priv = netdev_priv(dev);
1498         int rx_work_limit = *budget;
1499
1500         if (rx_work_limit > dev->quota)
1501                 rx_work_limit = dev->quota;
1502
1503         howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1504
1505         dev->quota -= howmany;
1506         rx_work_limit -= howmany;
1507         *budget -= howmany;
1508
1509         if (rx_work_limit >= 0) {
1510                 netif_rx_complete(dev);
1511
1512                 /* Clear the halt bit in RSTAT */
1513                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1514
1515                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1516
1517                 /* If we are coalescing interrupts, update the timer */
1518                 /* Otherwise, clear it */
1519                 if (priv->rxcoalescing)
1520                         gfar_write(&priv->regs->rxic,
1521                                    mk_ic_value(priv->rxcount, priv->rxtime));
1522                 else
1523                         gfar_write(&priv->regs->rxic, 0);
1524         }
1525
1526         return (rx_work_limit < 0) ? 1 : 0;
1527 }
1528 #endif
1529
1530 /* The interrupt handler for devices with one interrupt */
1531 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1532 {
1533         struct net_device *dev = dev_id;
1534         struct gfar_private *priv = netdev_priv(dev);
1535
1536         /* Save ievent for future reference */
1537         u32 events = gfar_read(&priv->regs->ievent);
1538
1539         /* Clear IEVENT */
1540         gfar_write(&priv->regs->ievent, events);
1541
1542         /* Check for reception */
1543         if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
1544                 gfar_receive(irq, dev_id, regs);
1545
1546         /* Check for transmit completion */
1547         if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
1548                 gfar_transmit(irq, dev_id, regs);
1549
1550         /* Update error statistics */
1551         if (events & IEVENT_TXE) {
1552                 priv->stats.tx_errors++;
1553
1554                 if (events & IEVENT_LC)
1555                         priv->stats.tx_window_errors++;
1556                 if (events & IEVENT_CRL)
1557                         priv->stats.tx_aborted_errors++;
1558                 if (events & IEVENT_XFUN) {
1559                         if (netif_msg_tx_err(priv))
1560                                 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1561                         priv->stats.tx_dropped++;
1562                         priv->extra_stats.tx_underrun++;
1563
1564                         /* Reactivate the Tx Queues */
1565                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1566                 }
1567         }
1568         if (events & IEVENT_BSY) {
1569                 priv->stats.rx_errors++;
1570                 priv->extra_stats.rx_bsy++;
1571
1572                 gfar_receive(irq, dev_id, regs);
1573
1574 #ifndef CONFIG_GFAR_NAPI
1575                 /* Clear the halt bit in RSTAT */
1576                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1577 #endif
1578
1579                 if (netif_msg_rx_err(priv))
1580                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1581                                         dev->name,
1582                                         gfar_read(&priv->regs->rstat));
1583         }
1584         if (events & IEVENT_BABR) {
1585                 priv->stats.rx_errors++;
1586                 priv->extra_stats.rx_babr++;
1587
1588                 if (netif_msg_rx_err(priv))
1589                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1590         }
1591         if (events & IEVENT_EBERR) {
1592                 priv->extra_stats.eberr++;
1593                 if (netif_msg_rx_err(priv))
1594                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1595         }
1596         if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1597                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1598
1599         if (events & IEVENT_BABT) {
1600                 priv->extra_stats.tx_babt++;
1601                 if (netif_msg_rx_err(priv))
1602                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1603         }
1604
1605         return IRQ_HANDLED;
1606 }
1607
1608 /* Called every time the controller might need to be made
1609  * aware of new link state.  The PHY code conveys this
1610  * information through variables in the phydev structure, and this
1611  * function converts those variables into the appropriate
1612  * register values, and can bring down the device if needed.
1613  */
1614 static void adjust_link(struct net_device *dev)
1615 {
1616         struct gfar_private *priv = netdev_priv(dev);
1617         struct gfar *regs = priv->regs;
1618         unsigned long flags;
1619         struct phy_device *phydev = priv->phydev;
1620         int new_state = 0;
1621
1622         spin_lock_irqsave(&priv->lock, flags);
1623         if (phydev->link) {
1624                 u32 tempval = gfar_read(&regs->maccfg2);
1625
1626                 /* Now we make sure that we can be in full duplex mode.
1627                  * If not, we operate in half-duplex mode. */
1628                 if (phydev->duplex != priv->oldduplex) {
1629                         new_state = 1;
1630                         if (!(phydev->duplex))
1631                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1632                         else
1633                                 tempval |= MACCFG2_FULL_DUPLEX;
1634
1635                         priv->oldduplex = phydev->duplex;
1636                 }
1637
1638                 if (phydev->speed != priv->oldspeed) {
1639                         new_state = 1;
1640                         switch (phydev->speed) {
1641                         case 1000:
1642                                 tempval =
1643                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1644                                 break;
1645                         case 100:
1646                         case 10:
1647                                 tempval =
1648                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1649                                 break;
1650                         default:
1651                                 if (netif_msg_link(priv))
1652                                         printk(KERN_WARNING
1653                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1654                                                 dev->name, phydev->speed);
1655                                 break;
1656                         }
1657
1658                         priv->oldspeed = phydev->speed;
1659                 }
1660
1661                 gfar_write(&regs->maccfg2, tempval);
1662
1663                 if (!priv->oldlink) {
1664                         new_state = 1;
1665                         priv->oldlink = 1;
1666                         netif_schedule(dev);
1667                 }
1668         } else if (priv->oldlink) {
1669                 new_state = 1;
1670                 priv->oldlink = 0;
1671                 priv->oldspeed = 0;
1672                 priv->oldduplex = -1;
1673         }
1674
1675         if (new_state && netif_msg_link(priv))
1676                 phy_print_status(phydev);
1677
1678         spin_unlock_irqrestore(&priv->lock, flags);
1679 }
1680
1681 /* Update the hash table based on the current list of multicast
1682  * addresses we subscribe to.  Also, change the promiscuity of
1683  * the device based on the flags (this function is called
1684  * whenever dev->flags is changed */
1685 static void gfar_set_multi(struct net_device *dev)
1686 {
1687         struct dev_mc_list *mc_ptr;
1688         struct gfar_private *priv = netdev_priv(dev);
1689         struct gfar *regs = priv->regs;
1690         u32 tempval;
1691
1692         if(dev->flags & IFF_PROMISC) {
1693                 if (netif_msg_drv(priv))
1694                         printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1695                                         dev->name);
1696                 /* Set RCTRL to PROM */
1697                 tempval = gfar_read(&regs->rctrl);
1698                 tempval |= RCTRL_PROM;
1699                 gfar_write(&regs->rctrl, tempval);
1700         } else {
1701                 /* Set RCTRL to not PROM */
1702                 tempval = gfar_read(&regs->rctrl);
1703                 tempval &= ~(RCTRL_PROM);
1704                 gfar_write(&regs->rctrl, tempval);
1705         }
1706         
1707         if(dev->flags & IFF_ALLMULTI) {
1708                 /* Set the hash to rx all multicast frames */
1709                 gfar_write(&regs->igaddr0, 0xffffffff);
1710                 gfar_write(&regs->igaddr1, 0xffffffff);
1711                 gfar_write(&regs->igaddr2, 0xffffffff);
1712                 gfar_write(&regs->igaddr3, 0xffffffff);
1713                 gfar_write(&regs->igaddr4, 0xffffffff);
1714                 gfar_write(&regs->igaddr5, 0xffffffff);
1715                 gfar_write(&regs->igaddr6, 0xffffffff);
1716                 gfar_write(&regs->igaddr7, 0xffffffff);
1717                 gfar_write(&regs->gaddr0, 0xffffffff);
1718                 gfar_write(&regs->gaddr1, 0xffffffff);
1719                 gfar_write(&regs->gaddr2, 0xffffffff);
1720                 gfar_write(&regs->gaddr3, 0xffffffff);
1721                 gfar_write(&regs->gaddr4, 0xffffffff);
1722                 gfar_write(&regs->gaddr5, 0xffffffff);
1723                 gfar_write(&regs->gaddr6, 0xffffffff);
1724                 gfar_write(&regs->gaddr7, 0xffffffff);
1725         } else {
1726                 /* zero out the hash */
1727                 gfar_write(&regs->igaddr0, 0x0);
1728                 gfar_write(&regs->igaddr1, 0x0);
1729                 gfar_write(&regs->igaddr2, 0x0);
1730                 gfar_write(&regs->igaddr3, 0x0);
1731                 gfar_write(&regs->igaddr4, 0x0);
1732                 gfar_write(&regs->igaddr5, 0x0);
1733                 gfar_write(&regs->igaddr6, 0x0);
1734                 gfar_write(&regs->igaddr7, 0x0);
1735                 gfar_write(&regs->gaddr0, 0x0);
1736                 gfar_write(&regs->gaddr1, 0x0);
1737                 gfar_write(&regs->gaddr2, 0x0);
1738                 gfar_write(&regs->gaddr3, 0x0);
1739                 gfar_write(&regs->gaddr4, 0x0);
1740                 gfar_write(&regs->gaddr5, 0x0);
1741                 gfar_write(&regs->gaddr6, 0x0);
1742                 gfar_write(&regs->gaddr7, 0x0);
1743
1744                 if(dev->mc_count == 0)
1745                         return;
1746
1747                 /* Parse the list, and set the appropriate bits */
1748                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1749                         gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1750                 }
1751         }
1752
1753         return;
1754 }
1755
1756 /* Set the appropriate hash bit for the given addr */
1757 /* The algorithm works like so:
1758  * 1) Take the Destination Address (ie the multicast address), and
1759  * do a CRC on it (little endian), and reverse the bits of the
1760  * result.
1761  * 2) Use the 8 most significant bits as a hash into a 256-entry
1762  * table.  The table is controlled through 8 32-bit registers:
1763  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1764  * gaddr7.  This means that the 3 most significant bits in the
1765  * hash index which gaddr register to use, and the 5 other bits
1766  * indicate which bit (assuming an IBM numbering scheme, which
1767  * for PowerPC (tm) is usually the case) in the register holds
1768  * the entry. */
1769 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1770 {
1771         u32 tempval;
1772         struct gfar_private *priv = netdev_priv(dev);
1773         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1774         int width = priv->hash_width;
1775         u8 whichbit = (result >> (32 - width)) & 0x1f;
1776         u8 whichreg = result >> (32 - width + 5);
1777         u32 value = (1 << (31-whichbit));
1778
1779         tempval = gfar_read(priv->hash_regs[whichreg]);
1780         tempval |= value;
1781         gfar_write(priv->hash_regs[whichreg], tempval);
1782
1783         return;
1784 }
1785
1786 /* GFAR error interrupt handler */
1787 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1788 {
1789         struct net_device *dev = dev_id;
1790         struct gfar_private *priv = netdev_priv(dev);
1791
1792         /* Save ievent for future reference */
1793         u32 events = gfar_read(&priv->regs->ievent);
1794
1795         /* Clear IEVENT */
1796         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1797
1798         /* Hmm... */
1799         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1800                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1801                                 dev->name, events, gfar_read(&priv->regs->imask));
1802
1803         /* Update the error counters */
1804         if (events & IEVENT_TXE) {
1805                 priv->stats.tx_errors++;
1806
1807                 if (events & IEVENT_LC)
1808                         priv->stats.tx_window_errors++;
1809                 if (events & IEVENT_CRL)
1810                         priv->stats.tx_aborted_errors++;
1811                 if (events & IEVENT_XFUN) {
1812                         if (netif_msg_tx_err(priv))
1813                                 printk(KERN_DEBUG "%s: underrun.  packet dropped.\n",
1814                                                 dev->name);
1815                         priv->stats.tx_dropped++;
1816                         priv->extra_stats.tx_underrun++;
1817
1818                         /* Reactivate the Tx Queues */
1819                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1820                 }
1821                 if (netif_msg_tx_err(priv))
1822                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1823         }
1824         if (events & IEVENT_BSY) {
1825                 priv->stats.rx_errors++;
1826                 priv->extra_stats.rx_bsy++;
1827
1828                 gfar_receive(irq, dev_id, regs);
1829
1830 #ifndef CONFIG_GFAR_NAPI
1831                 /* Clear the halt bit in RSTAT */
1832                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1833 #endif
1834
1835                 if (netif_msg_rx_err(priv))
1836                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1837                                         dev->name,
1838                                         gfar_read(&priv->regs->rstat));
1839         }
1840         if (events & IEVENT_BABR) {
1841                 priv->stats.rx_errors++;
1842                 priv->extra_stats.rx_babr++;
1843
1844                 if (netif_msg_rx_err(priv))
1845                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1846         }
1847         if (events & IEVENT_EBERR) {
1848                 priv->extra_stats.eberr++;
1849                 if (netif_msg_rx_err(priv))
1850                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1851         }
1852         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1853                 if (netif_msg_rx_status(priv))
1854                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1855
1856         if (events & IEVENT_BABT) {
1857                 priv->extra_stats.tx_babt++;
1858                 if (netif_msg_tx_err(priv))
1859                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1860         }
1861         return IRQ_HANDLED;
1862 }
1863
1864 /* Structure for a device driver */
1865 static struct device_driver gfar_driver = {
1866         .name = "fsl-gianfar",
1867         .bus = &platform_bus_type,
1868         .probe = gfar_probe,
1869         .remove = gfar_remove,
1870 };
1871
1872 static int __init gfar_init(void)
1873 {
1874         int err = gfar_mdio_init();
1875
1876         if (err)
1877                 return err;
1878
1879         err = driver_register(&gfar_driver);
1880
1881         if (err)
1882                 gfar_mdio_exit();
1883         
1884         return err;
1885 }
1886
1887 static void __exit gfar_exit(void)
1888 {
1889         driver_unregister(&gfar_driver);
1890         gfar_mdio_exit();
1891 }
1892
1893 module_init(gfar_init);
1894 module_exit(gfar_exit);
1895