[TG3]: ethtool always report port is TP.
[linux-2.6] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  *
12  * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
13  *
14  * This program is free software; you can redistribute  it and/or modify it
15  * under  the terms of  the GNU General  Public License as published by the
16  * Free Software Foundation;  either version 2 of the  License, or (at your
17  * option) any later version.
18  *
19  *  Gianfar:  AKA Lambda Draconis, "Dragon"
20  *  RA 11 31 24.2
21  *  Dec +69 19 52
22  *  V 3.84
23  *  B-V +1.62
24  *
25  *  Theory of operation
26  *
27  *  The driver is initialized through platform_device.  Structures which
28  *  define the configuration needed by the board are defined in a
29  *  board structure in arch/ppc/platforms (though I do not
30  *  discount the possibility that other architectures could one
31  *  day be supported.
32  *
33  *  The Gianfar Ethernet Controller uses a ring of buffer
34  *  descriptors.  The beginning is indicated by a register
35  *  pointing to the physical address of the start of the ring.
36  *  The end is determined by a "wrap" bit being set in the
37  *  last descriptor of the ring.
38  *
39  *  When a packet is received, the RXF bit in the
40  *  IEVENT register is set, triggering an interrupt when the
41  *  corresponding bit in the IMASK register is also set (if
42  *  interrupt coalescing is active, then the interrupt may not
43  *  happen immediately, but will wait until either a set number
44  *  of frames or amount of time have passed).  In NAPI, the
45  *  interrupt handler will signal there is work to be done, and
46  *  exit.  Without NAPI, the packet(s) will be handled
47  *  immediately.  Both methods will start at the last known empty
48  *  descriptor, and process every subsequent descriptor until there
49  *  are none left with data (NAPI will stop after a set number of
50  *  packets to give time to other tasks, but will eventually
51  *  process all the packets).  The data arrives inside a
52  *  pre-allocated skb, and so after the skb is passed up to the
53  *  stack, a new skb must be allocated, and the address field in
54  *  the buffer descriptor must be updated to indicate this new
55  *  skb.
56  *
57  *  When the kernel requests that a packet be transmitted, the
58  *  driver starts where it left off last time, and points the
59  *  descriptor at the buffer which was passed in.  The driver
60  *  then informs the DMA engine that there are packets ready to
61  *  be transmitted.  Once the controller is finished transmitting
62  *  the packet, an interrupt may be triggered (under the same
63  *  conditions as for reception, but depending on the TXF bit).
64  *  The driver then cleans up the buffer.
65  */
66
67 #include <linux/config.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/string.h>
71 #include <linux/errno.h>
72 #include <linux/unistd.h>
73 #include <linux/slab.h>
74 #include <linux/interrupt.h>
75 #include <linux/init.h>
76 #include <linux/delay.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/skbuff.h>
80 #include <linux/if_vlan.h>
81 #include <linux/spinlock.h>
82 #include <linux/mm.h>
83 #include <linux/platform_device.h>
84 #include <linux/ip.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
87 #include <linux/in.h>
88
89 #include <asm/io.h>
90 #include <asm/irq.h>
91 #include <asm/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97
98 #include "gianfar.h"
99 #include "gianfar_mii.h"
100
101 #define TX_TIMEOUT      (1*HZ)
102 #define SKB_ALLOC_TIMEOUT 1000000
103 #undef BRIEF_GFAR_ERRORS
104 #undef VERBOSE_GFAR_ERRORS
105
106 #ifdef CONFIG_GFAR_NAPI
107 #define RECEIVE(x) netif_receive_skb(x)
108 #else
109 #define RECEIVE(x) netif_rx(x)
110 #endif
111
112 const char gfar_driver_name[] = "Gianfar Ethernet";
113 const char gfar_driver_version[] = "1.3";
114
115 static int gfar_enet_open(struct net_device *dev);
116 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
117 static void gfar_timeout(struct net_device *dev);
118 static int gfar_close(struct net_device *dev);
119 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
120 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
121 static int gfar_set_mac_address(struct net_device *dev);
122 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
124 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
125 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
126 static void adjust_link(struct net_device *dev);
127 static void init_registers(struct net_device *dev);
128 static int init_phy(struct net_device *dev);
129 static int gfar_probe(struct platform_device *pdev);
130 static int gfar_remove(struct platform_device *pdev);
131 static void free_skb_resources(struct gfar_private *priv);
132 static void gfar_set_multi(struct net_device *dev);
133 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134 #ifdef CONFIG_GFAR_NAPI
135 static int gfar_poll(struct net_device *dev, int *budget);
136 #endif
137 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
138 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
139 static void gfar_vlan_rx_register(struct net_device *netdev,
140                                 struct vlan_group *grp);
141 static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
142 void gfar_halt(struct net_device *dev);
143 void gfar_start(struct net_device *dev);
144 static void gfar_clear_exact_match(struct net_device *dev);
145 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
146
147 extern struct ethtool_ops gfar_ethtool_ops;
148
149 MODULE_AUTHOR("Freescale Semiconductor, Inc");
150 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151 MODULE_LICENSE("GPL");
152
153 /* Returns 1 if incoming frames use an FCB */
154 static inline int gfar_uses_fcb(struct gfar_private *priv)
155 {
156         return (priv->vlan_enable || priv->rx_csum_enable);
157 }
158
159 /* Set up the ethernet device structure, private data,
160  * and anything else we need before we start */
161 static int gfar_probe(struct platform_device *pdev)
162 {
163         u32 tempval;
164         struct net_device *dev = NULL;
165         struct gfar_private *priv = NULL;
166         struct gianfar_platform_data *einfo;
167         struct resource *r;
168         int idx;
169         int err = 0;
170
171         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
172
173         if (NULL == einfo) {
174                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
175                        pdev->id);
176
177                 return -ENODEV;
178         }
179
180         /* Create an ethernet device instance */
181         dev = alloc_etherdev(sizeof (*priv));
182
183         if (NULL == dev)
184                 return -ENOMEM;
185
186         priv = netdev_priv(dev);
187
188         /* Set the info in the priv to the current info */
189         priv->einfo = einfo;
190
191         /* fill out IRQ fields */
192         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
193                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
194                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
195                 priv->interruptError = platform_get_irq_byname(pdev, "error");
196                 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
197                         goto regs_fail;
198         } else {
199                 priv->interruptTransmit = platform_get_irq(pdev, 0);
200                 if (priv->interruptTransmit < 0)
201                         goto regs_fail;
202         }
203
204         /* get a pointer to the register memory */
205         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206         priv->regs = ioremap(r->start, sizeof (struct gfar));
207
208         if (NULL == priv->regs) {
209                 err = -ENOMEM;
210                 goto regs_fail;
211         }
212
213         spin_lock_init(&priv->txlock);
214         spin_lock_init(&priv->rxlock);
215
216         platform_set_drvdata(pdev, dev);
217
218         /* Stop the DMA engine now, in case it was running before */
219         /* (The firmware could have used it, and left it running). */
220         /* To do this, we write Graceful Receive Stop and Graceful */
221         /* Transmit Stop, and then wait until the corresponding bits */
222         /* in IEVENT indicate the stops have completed. */
223         tempval = gfar_read(&priv->regs->dmactrl);
224         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
225         gfar_write(&priv->regs->dmactrl, tempval);
226
227         tempval = gfar_read(&priv->regs->dmactrl);
228         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
229         gfar_write(&priv->regs->dmactrl, tempval);
230
231         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
232                 cpu_relax();
233
234         /* Reset MAC layer */
235         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
236
237         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
238         gfar_write(&priv->regs->maccfg1, tempval);
239
240         /* Initialize MACCFG2. */
241         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
242
243         /* Initialize ECNTRL */
244         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
245
246         /* Copy the station address into the dev structure, */
247         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
248
249         /* Set the dev->base_addr to the gfar reg region */
250         dev->base_addr = (unsigned long) (priv->regs);
251
252         SET_MODULE_OWNER(dev);
253         SET_NETDEV_DEV(dev, &pdev->dev);
254
255         /* Fill in the dev structure */
256         dev->open = gfar_enet_open;
257         dev->hard_start_xmit = gfar_start_xmit;
258         dev->tx_timeout = gfar_timeout;
259         dev->watchdog_timeo = TX_TIMEOUT;
260 #ifdef CONFIG_GFAR_NAPI
261         dev->poll = gfar_poll;
262         dev->weight = GFAR_DEV_WEIGHT;
263 #endif
264         dev->stop = gfar_close;
265         dev->get_stats = gfar_get_stats;
266         dev->change_mtu = gfar_change_mtu;
267         dev->mtu = 1500;
268         dev->set_multicast_list = gfar_set_multi;
269
270         dev->ethtool_ops = &gfar_ethtool_ops;
271
272         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
273                 priv->rx_csum_enable = 1;
274                 dev->features |= NETIF_F_IP_CSUM;
275         } else
276                 priv->rx_csum_enable = 0;
277
278         priv->vlgrp = NULL;
279
280         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
281                 dev->vlan_rx_register = gfar_vlan_rx_register;
282                 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
283
284                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
285
286                 priv->vlan_enable = 1;
287         }
288
289         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
290                 priv->extended_hash = 1;
291                 priv->hash_width = 9;
292
293                 priv->hash_regs[0] = &priv->regs->igaddr0;
294                 priv->hash_regs[1] = &priv->regs->igaddr1;
295                 priv->hash_regs[2] = &priv->regs->igaddr2;
296                 priv->hash_regs[3] = &priv->regs->igaddr3;
297                 priv->hash_regs[4] = &priv->regs->igaddr4;
298                 priv->hash_regs[5] = &priv->regs->igaddr5;
299                 priv->hash_regs[6] = &priv->regs->igaddr6;
300                 priv->hash_regs[7] = &priv->regs->igaddr7;
301                 priv->hash_regs[8] = &priv->regs->gaddr0;
302                 priv->hash_regs[9] = &priv->regs->gaddr1;
303                 priv->hash_regs[10] = &priv->regs->gaddr2;
304                 priv->hash_regs[11] = &priv->regs->gaddr3;
305                 priv->hash_regs[12] = &priv->regs->gaddr4;
306                 priv->hash_regs[13] = &priv->regs->gaddr5;
307                 priv->hash_regs[14] = &priv->regs->gaddr6;
308                 priv->hash_regs[15] = &priv->regs->gaddr7;
309
310         } else {
311                 priv->extended_hash = 0;
312                 priv->hash_width = 8;
313
314                 priv->hash_regs[0] = &priv->regs->gaddr0;
315                 priv->hash_regs[1] = &priv->regs->gaddr1;
316                 priv->hash_regs[2] = &priv->regs->gaddr2;
317                 priv->hash_regs[3] = &priv->regs->gaddr3;
318                 priv->hash_regs[4] = &priv->regs->gaddr4;
319                 priv->hash_regs[5] = &priv->regs->gaddr5;
320                 priv->hash_regs[6] = &priv->regs->gaddr6;
321                 priv->hash_regs[7] = &priv->regs->gaddr7;
322         }
323
324         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
325                 priv->padding = DEFAULT_PADDING;
326         else
327                 priv->padding = 0;
328
329         if (dev->features & NETIF_F_IP_CSUM)
330                 dev->hard_header_len += GMAC_FCB_LEN;
331
332         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
333         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
334         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
335
336         priv->txcoalescing = DEFAULT_TX_COALESCE;
337         priv->txcount = DEFAULT_TXCOUNT;
338         priv->txtime = DEFAULT_TXTIME;
339         priv->rxcoalescing = DEFAULT_RX_COALESCE;
340         priv->rxcount = DEFAULT_RXCOUNT;
341         priv->rxtime = DEFAULT_RXTIME;
342
343         /* Enable most messages by default */
344         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
345
346         err = register_netdev(dev);
347
348         if (err) {
349                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
350                                 dev->name);
351                 goto register_fail;
352         }
353
354         /* Create all the sysfs files */
355         gfar_init_sysfs(dev);
356
357         /* Print out the device info */
358         printk(KERN_INFO DEVICE_NAME, dev->name);
359         for (idx = 0; idx < 6; idx++)
360                 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
361         printk("\n");
362
363         /* Even more device info helps when determining which kernel */
364         /* provided which set of benchmarks. */
365 #ifdef CONFIG_GFAR_NAPI
366         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
367 #else
368         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
369 #endif
370         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
371                dev->name, priv->rx_ring_size, priv->tx_ring_size);
372
373         return 0;
374
375 register_fail:
376         iounmap(priv->regs);
377 regs_fail:
378         free_netdev(dev);
379         return err;
380 }
381
382 static int gfar_remove(struct platform_device *pdev)
383 {
384         struct net_device *dev = platform_get_drvdata(pdev);
385         struct gfar_private *priv = netdev_priv(dev);
386
387         platform_set_drvdata(pdev, NULL);
388
389         iounmap(priv->regs);
390         free_netdev(dev);
391
392         return 0;
393 }
394
395
396 /* Initializes driver's PHY state, and attaches to the PHY.
397  * Returns 0 on success.
398  */
399 static int init_phy(struct net_device *dev)
400 {
401         struct gfar_private *priv = netdev_priv(dev);
402         uint gigabit_support =
403                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
404                 SUPPORTED_1000baseT_Full : 0;
405         struct phy_device *phydev;
406         char phy_id[BUS_ID_SIZE];
407
408         priv->oldlink = 0;
409         priv->oldspeed = 0;
410         priv->oldduplex = -1;
411
412         snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
413
414         phydev = phy_connect(dev, phy_id, &adjust_link, 0);
415
416         if (IS_ERR(phydev)) {
417                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
418                 return PTR_ERR(phydev);
419         }
420
421         /* Remove any features not supported by the controller */
422         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
423         phydev->advertising = phydev->supported;
424
425         priv->phydev = phydev;
426
427         return 0;
428 }
429
430 static void init_registers(struct net_device *dev)
431 {
432         struct gfar_private *priv = netdev_priv(dev);
433
434         /* Clear IEVENT */
435         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
436
437         /* Initialize IMASK */
438         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
439
440         /* Init hash registers to zero */
441         gfar_write(&priv->regs->igaddr0, 0);
442         gfar_write(&priv->regs->igaddr1, 0);
443         gfar_write(&priv->regs->igaddr2, 0);
444         gfar_write(&priv->regs->igaddr3, 0);
445         gfar_write(&priv->regs->igaddr4, 0);
446         gfar_write(&priv->regs->igaddr5, 0);
447         gfar_write(&priv->regs->igaddr6, 0);
448         gfar_write(&priv->regs->igaddr7, 0);
449
450         gfar_write(&priv->regs->gaddr0, 0);
451         gfar_write(&priv->regs->gaddr1, 0);
452         gfar_write(&priv->regs->gaddr2, 0);
453         gfar_write(&priv->regs->gaddr3, 0);
454         gfar_write(&priv->regs->gaddr4, 0);
455         gfar_write(&priv->regs->gaddr5, 0);
456         gfar_write(&priv->regs->gaddr6, 0);
457         gfar_write(&priv->regs->gaddr7, 0);
458
459         /* Zero out the rmon mib registers if it has them */
460         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
461                 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
462
463                 /* Mask off the CAM interrupts */
464                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
465                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
466         }
467
468         /* Initialize the max receive buffer length */
469         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
470
471         /* Initialize the Minimum Frame Length Register */
472         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
473
474         /* Assign the TBI an address which won't conflict with the PHYs */
475         gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
476 }
477
478
479 /* Halt the receive and transmit queues */
480 void gfar_halt(struct net_device *dev)
481 {
482         struct gfar_private *priv = netdev_priv(dev);
483         struct gfar __iomem *regs = priv->regs;
484         u32 tempval;
485
486         /* Mask all interrupts */
487         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
488
489         /* Clear all interrupts */
490         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
491
492         /* Stop the DMA, and wait for it to stop */
493         tempval = gfar_read(&priv->regs->dmactrl);
494         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
495             != (DMACTRL_GRS | DMACTRL_GTS)) {
496                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
497                 gfar_write(&priv->regs->dmactrl, tempval);
498
499                 while (!(gfar_read(&priv->regs->ievent) &
500                          (IEVENT_GRSC | IEVENT_GTSC)))
501                         cpu_relax();
502         }
503
504         /* Disable Rx and Tx */
505         tempval = gfar_read(&regs->maccfg1);
506         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
507         gfar_write(&regs->maccfg1, tempval);
508 }
509
510 void stop_gfar(struct net_device *dev)
511 {
512         struct gfar_private *priv = netdev_priv(dev);
513         struct gfar __iomem *regs = priv->regs;
514         unsigned long flags;
515
516         phy_stop(priv->phydev);
517
518         /* Lock it down */
519         spin_lock_irqsave(&priv->txlock, flags);
520         spin_lock(&priv->rxlock);
521
522         gfar_halt(dev);
523
524         spin_unlock(&priv->rxlock);
525         spin_unlock_irqrestore(&priv->txlock, flags);
526
527         /* Free the IRQs */
528         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
529                 free_irq(priv->interruptError, dev);
530                 free_irq(priv->interruptTransmit, dev);
531                 free_irq(priv->interruptReceive, dev);
532         } else {
533                 free_irq(priv->interruptTransmit, dev);
534         }
535
536         free_skb_resources(priv);
537
538         dma_free_coherent(NULL,
539                         sizeof(struct txbd8)*priv->tx_ring_size
540                         + sizeof(struct rxbd8)*priv->rx_ring_size,
541                         priv->tx_bd_base,
542                         gfar_read(&regs->tbase0));
543 }
544
545 /* If there are any tx skbs or rx skbs still around, free them.
546  * Then free tx_skbuff and rx_skbuff */
547 static void free_skb_resources(struct gfar_private *priv)
548 {
549         struct rxbd8 *rxbdp;
550         struct txbd8 *txbdp;
551         int i;
552
553         /* Go through all the buffer descriptors and free their data buffers */
554         txbdp = priv->tx_bd_base;
555
556         for (i = 0; i < priv->tx_ring_size; i++) {
557
558                 if (priv->tx_skbuff[i]) {
559                         dma_unmap_single(NULL, txbdp->bufPtr,
560                                         txbdp->length,
561                                         DMA_TO_DEVICE);
562                         dev_kfree_skb_any(priv->tx_skbuff[i]);
563                         priv->tx_skbuff[i] = NULL;
564                 }
565         }
566
567         kfree(priv->tx_skbuff);
568
569         rxbdp = priv->rx_bd_base;
570
571         /* rx_skbuff is not guaranteed to be allocated, so only
572          * free it and its contents if it is allocated */
573         if(priv->rx_skbuff != NULL) {
574                 for (i = 0; i < priv->rx_ring_size; i++) {
575                         if (priv->rx_skbuff[i]) {
576                                 dma_unmap_single(NULL, rxbdp->bufPtr,
577                                                 priv->rx_buffer_size,
578                                                 DMA_FROM_DEVICE);
579
580                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
581                                 priv->rx_skbuff[i] = NULL;
582                         }
583
584                         rxbdp->status = 0;
585                         rxbdp->length = 0;
586                         rxbdp->bufPtr = 0;
587
588                         rxbdp++;
589                 }
590
591                 kfree(priv->rx_skbuff);
592         }
593 }
594
595 void gfar_start(struct net_device *dev)
596 {
597         struct gfar_private *priv = netdev_priv(dev);
598         struct gfar __iomem *regs = priv->regs;
599         u32 tempval;
600
601         /* Enable Rx and Tx in MACCFG1 */
602         tempval = gfar_read(&regs->maccfg1);
603         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
604         gfar_write(&regs->maccfg1, tempval);
605
606         /* Initialize DMACTRL to have WWR and WOP */
607         tempval = gfar_read(&priv->regs->dmactrl);
608         tempval |= DMACTRL_INIT_SETTINGS;
609         gfar_write(&priv->regs->dmactrl, tempval);
610
611         /* Make sure we aren't stopped */
612         tempval = gfar_read(&priv->regs->dmactrl);
613         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
614         gfar_write(&priv->regs->dmactrl, tempval);
615
616         /* Clear THLT/RHLT, so that the DMA starts polling now */
617         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
618         gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
619
620         /* Unmask the interrupts we look for */
621         gfar_write(&regs->imask, IMASK_DEFAULT);
622 }
623
624 /* Bring the controller up and running */
625 int startup_gfar(struct net_device *dev)
626 {
627         struct txbd8 *txbdp;
628         struct rxbd8 *rxbdp;
629         dma_addr_t addr;
630         unsigned long vaddr;
631         int i;
632         struct gfar_private *priv = netdev_priv(dev);
633         struct gfar __iomem *regs = priv->regs;
634         int err = 0;
635         u32 rctrl = 0;
636         u32 attrs = 0;
637
638         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
639
640         /* Allocate memory for the buffer descriptors */
641         vaddr = (unsigned long) dma_alloc_coherent(NULL,
642                         sizeof (struct txbd8) * priv->tx_ring_size +
643                         sizeof (struct rxbd8) * priv->rx_ring_size,
644                         &addr, GFP_KERNEL);
645
646         if (vaddr == 0) {
647                 if (netif_msg_ifup(priv))
648                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
649                                         dev->name);
650                 return -ENOMEM;
651         }
652
653         priv->tx_bd_base = (struct txbd8 *) vaddr;
654
655         /* enet DMA only understands physical addresses */
656         gfar_write(&regs->tbase0, addr);
657
658         /* Start the rx descriptor ring where the tx ring leaves off */
659         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
660         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
661         priv->rx_bd_base = (struct rxbd8 *) vaddr;
662         gfar_write(&regs->rbase0, addr);
663
664         /* Setup the skbuff rings */
665         priv->tx_skbuff =
666             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
667                                         priv->tx_ring_size, GFP_KERNEL);
668
669         if (NULL == priv->tx_skbuff) {
670                 if (netif_msg_ifup(priv))
671                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
672                                         dev->name);
673                 err = -ENOMEM;
674                 goto tx_skb_fail;
675         }
676
677         for (i = 0; i < priv->tx_ring_size; i++)
678                 priv->tx_skbuff[i] = NULL;
679
680         priv->rx_skbuff =
681             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
682                                         priv->rx_ring_size, GFP_KERNEL);
683
684         if (NULL == priv->rx_skbuff) {
685                 if (netif_msg_ifup(priv))
686                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
687                                         dev->name);
688                 err = -ENOMEM;
689                 goto rx_skb_fail;
690         }
691
692         for (i = 0; i < priv->rx_ring_size; i++)
693                 priv->rx_skbuff[i] = NULL;
694
695         /* Initialize some variables in our dev structure */
696         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
697         priv->cur_rx = priv->rx_bd_base;
698         priv->skb_curtx = priv->skb_dirtytx = 0;
699         priv->skb_currx = 0;
700
701         /* Initialize Transmit Descriptor Ring */
702         txbdp = priv->tx_bd_base;
703         for (i = 0; i < priv->tx_ring_size; i++) {
704                 txbdp->status = 0;
705                 txbdp->length = 0;
706                 txbdp->bufPtr = 0;
707                 txbdp++;
708         }
709
710         /* Set the last descriptor in the ring to indicate wrap */
711         txbdp--;
712         txbdp->status |= TXBD_WRAP;
713
714         rxbdp = priv->rx_bd_base;
715         for (i = 0; i < priv->rx_ring_size; i++) {
716                 struct sk_buff *skb = NULL;
717
718                 rxbdp->status = 0;
719
720                 skb = gfar_new_skb(dev, rxbdp);
721
722                 priv->rx_skbuff[i] = skb;
723
724                 rxbdp++;
725         }
726
727         /* Set the last descriptor in the ring to wrap */
728         rxbdp--;
729         rxbdp->status |= RXBD_WRAP;
730
731         /* If the device has multiple interrupts, register for
732          * them.  Otherwise, only register for the one */
733         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
734                 /* Install our interrupt handlers for Error,
735                  * Transmit, and Receive */
736                 if (request_irq(priv->interruptError, gfar_error,
737                                 0, "enet_error", dev) < 0) {
738                         if (netif_msg_intr(priv))
739                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
740                                         dev->name, priv->interruptError);
741
742                         err = -1;
743                         goto err_irq_fail;
744                 }
745
746                 if (request_irq(priv->interruptTransmit, gfar_transmit,
747                                 0, "enet_tx", dev) < 0) {
748                         if (netif_msg_intr(priv))
749                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
750                                         dev->name, priv->interruptTransmit);
751
752                         err = -1;
753
754                         goto tx_irq_fail;
755                 }
756
757                 if (request_irq(priv->interruptReceive, gfar_receive,
758                                 0, "enet_rx", dev) < 0) {
759                         if (netif_msg_intr(priv))
760                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
761                                                 dev->name, priv->interruptReceive);
762
763                         err = -1;
764                         goto rx_irq_fail;
765                 }
766         } else {
767                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
768                                 0, "gfar_interrupt", dev) < 0) {
769                         if (netif_msg_intr(priv))
770                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
771                                         dev->name, priv->interruptError);
772
773                         err = -1;
774                         goto err_irq_fail;
775                 }
776         }
777
778         phy_start(priv->phydev);
779
780         /* Configure the coalescing support */
781         if (priv->txcoalescing)
782                 gfar_write(&regs->txic,
783                            mk_ic_value(priv->txcount, priv->txtime));
784         else
785                 gfar_write(&regs->txic, 0);
786
787         if (priv->rxcoalescing)
788                 gfar_write(&regs->rxic,
789                            mk_ic_value(priv->rxcount, priv->rxtime));
790         else
791                 gfar_write(&regs->rxic, 0);
792
793         if (priv->rx_csum_enable)
794                 rctrl |= RCTRL_CHECKSUMMING;
795
796         if (priv->extended_hash) {
797                 rctrl |= RCTRL_EXTHASH;
798
799                 gfar_clear_exact_match(dev);
800                 rctrl |= RCTRL_EMEN;
801         }
802
803         if (priv->vlan_enable)
804                 rctrl |= RCTRL_VLAN;
805
806         if (priv->padding) {
807                 rctrl &= ~RCTRL_PAL_MASK;
808                 rctrl |= RCTRL_PADDING(priv->padding);
809         }
810
811         /* Init rctrl based on our settings */
812         gfar_write(&priv->regs->rctrl, rctrl);
813
814         if (dev->features & NETIF_F_IP_CSUM)
815                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
816
817         /* Set the extraction length and index */
818         attrs = ATTRELI_EL(priv->rx_stash_size) |
819                 ATTRELI_EI(priv->rx_stash_index);
820
821         gfar_write(&priv->regs->attreli, attrs);
822
823         /* Start with defaults, and add stashing or locking
824          * depending on the approprate variables */
825         attrs = ATTR_INIT_SETTINGS;
826
827         if (priv->bd_stash_en)
828                 attrs |= ATTR_BDSTASH;
829
830         if (priv->rx_stash_size != 0)
831                 attrs |= ATTR_BUFSTASH;
832
833         gfar_write(&priv->regs->attr, attrs);
834
835         gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
836         gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
837         gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
838
839         /* Start the controller */
840         gfar_start(dev);
841
842         return 0;
843
844 rx_irq_fail:
845         free_irq(priv->interruptTransmit, dev);
846 tx_irq_fail:
847         free_irq(priv->interruptError, dev);
848 err_irq_fail:
849 rx_skb_fail:
850         free_skb_resources(priv);
851 tx_skb_fail:
852         dma_free_coherent(NULL,
853                         sizeof(struct txbd8)*priv->tx_ring_size
854                         + sizeof(struct rxbd8)*priv->rx_ring_size,
855                         priv->tx_bd_base,
856                         gfar_read(&regs->tbase0));
857
858         return err;
859 }
860
861 /* Called when something needs to use the ethernet device */
862 /* Returns 0 for success. */
863 static int gfar_enet_open(struct net_device *dev)
864 {
865         int err;
866
867         /* Initialize a bunch of registers */
868         init_registers(dev);
869
870         gfar_set_mac_address(dev);
871
872         err = init_phy(dev);
873
874         if(err)
875                 return err;
876
877         err = startup_gfar(dev);
878
879         netif_start_queue(dev);
880
881         return err;
882 }
883
884 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
885 {
886         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
887
888         memset(fcb, 0, GMAC_FCB_LEN);
889
890         return fcb;
891 }
892
893 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
894 {
895         u8 flags = 0;
896
897         /* If we're here, it's a IP packet with a TCP or UDP
898          * payload.  We set it to checksum, using a pseudo-header
899          * we provide
900          */
901         flags = TXFCB_DEFAULT;
902
903         /* Tell the controller what the protocol is */
904         /* And provide the already calculated phcs */
905         if (skb->nh.iph->protocol == IPPROTO_UDP) {
906                 flags |= TXFCB_UDP;
907                 fcb->phcs = skb->h.uh->check;
908         } else
909                 fcb->phcs = skb->h.th->check;
910
911         /* l3os is the distance between the start of the
912          * frame (skb->data) and the start of the IP hdr.
913          * l4os is the distance between the start of the
914          * l3 hdr and the l4 hdr */
915         fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
916         fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
917
918         fcb->flags = flags;
919 }
920
921 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
922 {
923         fcb->flags |= TXFCB_VLN;
924         fcb->vlctl = vlan_tx_tag_get(skb);
925 }
926
927 /* This is called by the kernel when a frame is ready for transmission. */
928 /* It is pointed to by the dev->hard_start_xmit function pointer */
929 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
930 {
931         struct gfar_private *priv = netdev_priv(dev);
932         struct txfcb *fcb = NULL;
933         struct txbd8 *txbdp;
934         u16 status;
935         unsigned long flags;
936
937         /* Update transmit stats */
938         priv->stats.tx_bytes += skb->len;
939
940         /* Lock priv now */
941         spin_lock_irqsave(&priv->txlock, flags);
942
943         /* Point at the first free tx descriptor */
944         txbdp = priv->cur_tx;
945
946         /* Clear all but the WRAP status flags */
947         status = txbdp->status & TXBD_WRAP;
948
949         /* Set up checksumming */
950         if (likely((dev->features & NETIF_F_IP_CSUM)
951                         && (CHECKSUM_HW == skb->ip_summed))) {
952                 fcb = gfar_add_fcb(skb, txbdp);
953                 status |= TXBD_TOE;
954                 gfar_tx_checksum(skb, fcb);
955         }
956
957         if (priv->vlan_enable &&
958                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
959                 if (unlikely(NULL == fcb)) {
960                         fcb = gfar_add_fcb(skb, txbdp);
961                         status |= TXBD_TOE;
962                 }
963
964                 gfar_tx_vlan(skb, fcb);
965         }
966
967         /* Set buffer length and pointer */
968         txbdp->length = skb->len;
969         txbdp->bufPtr = dma_map_single(NULL, skb->data,
970                         skb->len, DMA_TO_DEVICE);
971
972         /* Save the skb pointer so we can free it later */
973         priv->tx_skbuff[priv->skb_curtx] = skb;
974
975         /* Update the current skb pointer (wrapping if this was the last) */
976         priv->skb_curtx =
977             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
978
979         /* Flag the BD as interrupt-causing */
980         status |= TXBD_INTERRUPT;
981
982         /* Flag the BD as ready to go, last in frame, and  */
983         /* in need of CRC */
984         status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
985
986         dev->trans_start = jiffies;
987
988         txbdp->status = status;
989
990         /* If this was the last BD in the ring, the next one */
991         /* is at the beginning of the ring */
992         if (txbdp->status & TXBD_WRAP)
993                 txbdp = priv->tx_bd_base;
994         else
995                 txbdp++;
996
997         /* If the next BD still needs to be cleaned up, then the bds
998            are full.  We need to tell the kernel to stop sending us stuff. */
999         if (txbdp == priv->dirty_tx) {
1000                 netif_stop_queue(dev);
1001
1002                 priv->stats.tx_fifo_errors++;
1003         }
1004
1005         /* Update the current txbd to the next one */
1006         priv->cur_tx = txbdp;
1007
1008         /* Tell the DMA to go go go */
1009         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1010
1011         /* Unlock priv */
1012         spin_unlock_irqrestore(&priv->txlock, flags);
1013
1014         return 0;
1015 }
1016
1017 /* Stops the kernel queue, and halts the controller */
1018 static int gfar_close(struct net_device *dev)
1019 {
1020         struct gfar_private *priv = netdev_priv(dev);
1021         stop_gfar(dev);
1022
1023         /* Disconnect from the PHY */
1024         phy_disconnect(priv->phydev);
1025         priv->phydev = NULL;
1026
1027         netif_stop_queue(dev);
1028
1029         return 0;
1030 }
1031
1032 /* returns a net_device_stats structure pointer */
1033 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1034 {
1035         struct gfar_private *priv = netdev_priv(dev);
1036
1037         return &(priv->stats);
1038 }
1039
1040 /* Changes the mac address if the controller is not running. */
1041 int gfar_set_mac_address(struct net_device *dev)
1042 {
1043         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1044
1045         return 0;
1046 }
1047
1048
1049 /* Enables and disables VLAN insertion/extraction */
1050 static void gfar_vlan_rx_register(struct net_device *dev,
1051                 struct vlan_group *grp)
1052 {
1053         struct gfar_private *priv = netdev_priv(dev);
1054         unsigned long flags;
1055         u32 tempval;
1056
1057         spin_lock_irqsave(&priv->rxlock, flags);
1058
1059         priv->vlgrp = grp;
1060
1061         if (grp) {
1062                 /* Enable VLAN tag insertion */
1063                 tempval = gfar_read(&priv->regs->tctrl);
1064                 tempval |= TCTRL_VLINS;
1065
1066                 gfar_write(&priv->regs->tctrl, tempval);
1067                 
1068                 /* Enable VLAN tag extraction */
1069                 tempval = gfar_read(&priv->regs->rctrl);
1070                 tempval |= RCTRL_VLEX;
1071                 gfar_write(&priv->regs->rctrl, tempval);
1072         } else {
1073                 /* Disable VLAN tag insertion */
1074                 tempval = gfar_read(&priv->regs->tctrl);
1075                 tempval &= ~TCTRL_VLINS;
1076                 gfar_write(&priv->regs->tctrl, tempval);
1077
1078                 /* Disable VLAN tag extraction */
1079                 tempval = gfar_read(&priv->regs->rctrl);
1080                 tempval &= ~RCTRL_VLEX;
1081                 gfar_write(&priv->regs->rctrl, tempval);
1082         }
1083
1084         spin_unlock_irqrestore(&priv->rxlock, flags);
1085 }
1086
1087
1088 static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1089 {
1090         struct gfar_private *priv = netdev_priv(dev);
1091         unsigned long flags;
1092
1093         spin_lock_irqsave(&priv->rxlock, flags);
1094
1095         if (priv->vlgrp)
1096                 priv->vlgrp->vlan_devices[vid] = NULL;
1097
1098         spin_unlock_irqrestore(&priv->rxlock, flags);
1099 }
1100
1101
1102 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1103 {
1104         int tempsize, tempval;
1105         struct gfar_private *priv = netdev_priv(dev);
1106         int oldsize = priv->rx_buffer_size;
1107         int frame_size = new_mtu + ETH_HLEN;
1108
1109         if (priv->vlan_enable)
1110                 frame_size += VLAN_ETH_HLEN;
1111
1112         if (gfar_uses_fcb(priv))
1113                 frame_size += GMAC_FCB_LEN;
1114
1115         frame_size += priv->padding;
1116
1117         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1118                 if (netif_msg_drv(priv))
1119                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1120                                         dev->name);
1121                 return -EINVAL;
1122         }
1123
1124         tempsize =
1125             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1126             INCREMENTAL_BUFFER_SIZE;
1127
1128         /* Only stop and start the controller if it isn't already
1129          * stopped, and we changed something */
1130         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1131                 stop_gfar(dev);
1132
1133         priv->rx_buffer_size = tempsize;
1134
1135         dev->mtu = new_mtu;
1136
1137         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1138         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1139
1140         /* If the mtu is larger than the max size for standard
1141          * ethernet frames (ie, a jumbo frame), then set maccfg2
1142          * to allow huge frames, and to check the length */
1143         tempval = gfar_read(&priv->regs->maccfg2);
1144
1145         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1146                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1147         else
1148                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1149
1150         gfar_write(&priv->regs->maccfg2, tempval);
1151
1152         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1153                 startup_gfar(dev);
1154
1155         return 0;
1156 }
1157
1158 /* gfar_timeout gets called when a packet has not been
1159  * transmitted after a set amount of time.
1160  * For now, assume that clearing out all the structures, and
1161  * starting over will fix the problem. */
1162 static void gfar_timeout(struct net_device *dev)
1163 {
1164         struct gfar_private *priv = netdev_priv(dev);
1165
1166         priv->stats.tx_errors++;
1167
1168         if (dev->flags & IFF_UP) {
1169                 stop_gfar(dev);
1170                 startup_gfar(dev);
1171         }
1172
1173         netif_schedule(dev);
1174 }
1175
1176 /* Interrupt Handler for Transmit complete */
1177 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1178 {
1179         struct net_device *dev = (struct net_device *) dev_id;
1180         struct gfar_private *priv = netdev_priv(dev);
1181         struct txbd8 *bdp;
1182
1183         /* Clear IEVENT */
1184         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1185
1186         /* Lock priv */
1187         spin_lock(&priv->txlock);
1188         bdp = priv->dirty_tx;
1189         while ((bdp->status & TXBD_READY) == 0) {
1190                 /* If dirty_tx and cur_tx are the same, then either the */
1191                 /* ring is empty or full now (it could only be full in the beginning, */
1192                 /* obviously).  If it is empty, we are done. */
1193                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1194                         break;
1195
1196                 priv->stats.tx_packets++;
1197
1198                 /* Deferred means some collisions occurred during transmit, */
1199                 /* but we eventually sent the packet. */
1200                 if (bdp->status & TXBD_DEF)
1201                         priv->stats.collisions++;
1202
1203                 /* Free the sk buffer associated with this TxBD */
1204                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1205                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1206                 priv->skb_dirtytx =
1207                     (priv->skb_dirtytx +
1208                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1209
1210                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1211                 if (bdp->status & TXBD_WRAP)
1212                         bdp = priv->tx_bd_base;
1213                 else
1214                         bdp++;
1215
1216                 /* Move dirty_tx to be the next bd */
1217                 priv->dirty_tx = bdp;
1218
1219                 /* We freed a buffer, so now we can restart transmission */
1220                 if (netif_queue_stopped(dev))
1221                         netif_wake_queue(dev);
1222         } /* while ((bdp->status & TXBD_READY) == 0) */
1223
1224         /* If we are coalescing the interrupts, reset the timer */
1225         /* Otherwise, clear it */
1226         if (priv->txcoalescing)
1227                 gfar_write(&priv->regs->txic,
1228                            mk_ic_value(priv->txcount, priv->txtime));
1229         else
1230                 gfar_write(&priv->regs->txic, 0);
1231
1232         spin_unlock(&priv->txlock);
1233
1234         return IRQ_HANDLED;
1235 }
1236
1237 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1238 {
1239         unsigned int alignamount;
1240         struct gfar_private *priv = netdev_priv(dev);
1241         struct sk_buff *skb = NULL;
1242         unsigned int timeout = SKB_ALLOC_TIMEOUT;
1243
1244         /* We have to allocate the skb, so keep trying till we succeed */
1245         while ((!skb) && timeout--)
1246                 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1247
1248         if (NULL == skb)
1249                 return NULL;
1250
1251         alignamount = RXBUF_ALIGNMENT -
1252                 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1253
1254         /* We need the data buffer to be aligned properly.  We will reserve
1255          * as many bytes as needed to align the data properly
1256          */
1257         skb_reserve(skb, alignamount);
1258
1259         skb->dev = dev;
1260
1261         bdp->bufPtr = dma_map_single(NULL, skb->data,
1262                         priv->rx_buffer_size, DMA_FROM_DEVICE);
1263
1264         bdp->length = 0;
1265
1266         /* Mark the buffer empty */
1267         bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1268
1269         return skb;
1270 }
1271
1272 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1273 {
1274         struct net_device_stats *stats = &priv->stats;
1275         struct gfar_extra_stats *estats = &priv->extra_stats;
1276
1277         /* If the packet was truncated, none of the other errors
1278          * matter */
1279         if (status & RXBD_TRUNCATED) {
1280                 stats->rx_length_errors++;
1281
1282                 estats->rx_trunc++;
1283
1284                 return;
1285         }
1286         /* Count the errors, if there were any */
1287         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1288                 stats->rx_length_errors++;
1289
1290                 if (status & RXBD_LARGE)
1291                         estats->rx_large++;
1292                 else
1293                         estats->rx_short++;
1294         }
1295         if (status & RXBD_NONOCTET) {
1296                 stats->rx_frame_errors++;
1297                 estats->rx_nonoctet++;
1298         }
1299         if (status & RXBD_CRCERR) {
1300                 estats->rx_crcerr++;
1301                 stats->rx_crc_errors++;
1302         }
1303         if (status & RXBD_OVERRUN) {
1304                 estats->rx_overrun++;
1305                 stats->rx_crc_errors++;
1306         }
1307 }
1308
1309 irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1310 {
1311         struct net_device *dev = (struct net_device *) dev_id;
1312         struct gfar_private *priv = netdev_priv(dev);
1313 #ifdef CONFIG_GFAR_NAPI
1314         u32 tempval;
1315 #else
1316         unsigned long flags;
1317 #endif
1318
1319         /* Clear IEVENT, so rx interrupt isn't called again
1320          * because of this interrupt */
1321         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1322
1323         /* support NAPI */
1324 #ifdef CONFIG_GFAR_NAPI
1325         if (netif_rx_schedule_prep(dev)) {
1326                 tempval = gfar_read(&priv->regs->imask);
1327                 tempval &= IMASK_RX_DISABLED;
1328                 gfar_write(&priv->regs->imask, tempval);
1329
1330                 __netif_rx_schedule(dev);
1331         } else {
1332                 if (netif_msg_rx_err(priv))
1333                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1334                                 dev->name, gfar_read(&priv->regs->ievent),
1335                                 gfar_read(&priv->regs->imask));
1336         }
1337 #else
1338
1339         spin_lock_irqsave(&priv->rxlock, flags);
1340         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1341
1342         /* If we are coalescing interrupts, update the timer */
1343         /* Otherwise, clear it */
1344         if (priv->rxcoalescing)
1345                 gfar_write(&priv->regs->rxic,
1346                            mk_ic_value(priv->rxcount, priv->rxtime));
1347         else
1348                 gfar_write(&priv->regs->rxic, 0);
1349
1350         spin_unlock_irqrestore(&priv->rxlock, flags);
1351 #endif
1352
1353         return IRQ_HANDLED;
1354 }
1355
1356 static inline int gfar_rx_vlan(struct sk_buff *skb,
1357                 struct vlan_group *vlgrp, unsigned short vlctl)
1358 {
1359 #ifdef CONFIG_GFAR_NAPI
1360         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1361 #else
1362         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1363 #endif
1364 }
1365
1366 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1367 {
1368         /* If valid headers were found, and valid sums
1369          * were verified, then we tell the kernel that no
1370          * checksumming is necessary.  Otherwise, it is */
1371         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1372                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1373         else
1374                 skb->ip_summed = CHECKSUM_NONE;
1375 }
1376
1377
1378 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1379 {
1380         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1381
1382         /* Remove the FCB from the skb */
1383         skb_pull(skb, GMAC_FCB_LEN);
1384
1385         return fcb;
1386 }
1387
1388 /* gfar_process_frame() -- handle one incoming packet if skb
1389  * isn't NULL.  */
1390 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1391                 int length)
1392 {
1393         struct gfar_private *priv = netdev_priv(dev);
1394         struct rxfcb *fcb = NULL;
1395
1396         if (NULL == skb) {
1397                 if (netif_msg_rx_err(priv))
1398                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1399                 priv->stats.rx_dropped++;
1400                 priv->extra_stats.rx_skbmissing++;
1401         } else {
1402                 int ret;
1403
1404                 /* Prep the skb for the packet */
1405                 skb_put(skb, length);
1406
1407                 /* Grab the FCB if there is one */
1408                 if (gfar_uses_fcb(priv))
1409                         fcb = gfar_get_fcb(skb);
1410
1411                 /* Remove the padded bytes, if there are any */
1412                 if (priv->padding)
1413                         skb_pull(skb, priv->padding);
1414
1415                 if (priv->rx_csum_enable)
1416                         gfar_rx_checksum(skb, fcb);
1417
1418                 /* Tell the skb what kind of packet this is */
1419                 skb->protocol = eth_type_trans(skb, dev);
1420
1421                 /* Send the packet up the stack */
1422                 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1423                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1424                 else
1425                         ret = RECEIVE(skb);
1426
1427                 if (NET_RX_DROP == ret)
1428                         priv->extra_stats.kernel_dropped++;
1429         }
1430
1431         return 0;
1432 }
1433
1434 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1435  *   until the budget/quota has been reached. Returns the number
1436  *   of frames handled
1437  */
1438 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1439 {
1440         struct rxbd8 *bdp;
1441         struct sk_buff *skb;
1442         u16 pkt_len;
1443         int howmany = 0;
1444         struct gfar_private *priv = netdev_priv(dev);
1445
1446         /* Get the first full descriptor */
1447         bdp = priv->cur_rx;
1448
1449         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1450                 skb = priv->rx_skbuff[priv->skb_currx];
1451
1452                 if (!(bdp->status &
1453                       (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1454                        | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1455                         /* Increment the number of packets */
1456                         priv->stats.rx_packets++;
1457                         howmany++;
1458
1459                         /* Remove the FCS from the packet length */
1460                         pkt_len = bdp->length - 4;
1461
1462                         gfar_process_frame(dev, skb, pkt_len);
1463
1464                         priv->stats.rx_bytes += pkt_len;
1465                 } else {
1466                         count_errors(bdp->status, priv);
1467
1468                         if (skb)
1469                                 dev_kfree_skb_any(skb);
1470
1471                         priv->rx_skbuff[priv->skb_currx] = NULL;
1472                 }
1473
1474                 dev->last_rx = jiffies;
1475
1476                 /* Clear the status flags for this buffer */
1477                 bdp->status &= ~RXBD_STATS;
1478
1479                 /* Add another skb for the future */
1480                 skb = gfar_new_skb(dev, bdp);
1481                 priv->rx_skbuff[priv->skb_currx] = skb;
1482
1483                 /* Update to the next pointer */
1484                 if (bdp->status & RXBD_WRAP)
1485                         bdp = priv->rx_bd_base;
1486                 else
1487                         bdp++;
1488
1489                 /* update to point at the next skb */
1490                 priv->skb_currx =
1491                     (priv->skb_currx +
1492                      1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1493
1494         }
1495
1496         /* Update the current rxbd pointer to be the next one */
1497         priv->cur_rx = bdp;
1498
1499         return howmany;
1500 }
1501
1502 #ifdef CONFIG_GFAR_NAPI
1503 static int gfar_poll(struct net_device *dev, int *budget)
1504 {
1505         int howmany;
1506         struct gfar_private *priv = netdev_priv(dev);
1507         int rx_work_limit = *budget;
1508
1509         if (rx_work_limit > dev->quota)
1510                 rx_work_limit = dev->quota;
1511
1512         howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1513
1514         dev->quota -= howmany;
1515         rx_work_limit -= howmany;
1516         *budget -= howmany;
1517
1518         if (rx_work_limit > 0) {
1519                 netif_rx_complete(dev);
1520
1521                 /* Clear the halt bit in RSTAT */
1522                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1523
1524                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1525
1526                 /* If we are coalescing interrupts, update the timer */
1527                 /* Otherwise, clear it */
1528                 if (priv->rxcoalescing)
1529                         gfar_write(&priv->regs->rxic,
1530                                    mk_ic_value(priv->rxcount, priv->rxtime));
1531                 else
1532                         gfar_write(&priv->regs->rxic, 0);
1533         }
1534
1535         /* Return 1 if there's more work to do */
1536         return (rx_work_limit > 0) ? 0 : 1;
1537 }
1538 #endif
1539
1540 /* The interrupt handler for devices with one interrupt */
1541 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1542 {
1543         struct net_device *dev = dev_id;
1544         struct gfar_private *priv = netdev_priv(dev);
1545
1546         /* Save ievent for future reference */
1547         u32 events = gfar_read(&priv->regs->ievent);
1548
1549         /* Clear IEVENT */
1550         gfar_write(&priv->regs->ievent, events);
1551
1552         /* Check for reception */
1553         if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
1554                 gfar_receive(irq, dev_id, regs);
1555
1556         /* Check for transmit completion */
1557         if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
1558                 gfar_transmit(irq, dev_id, regs);
1559
1560         /* Update error statistics */
1561         if (events & IEVENT_TXE) {
1562                 priv->stats.tx_errors++;
1563
1564                 if (events & IEVENT_LC)
1565                         priv->stats.tx_window_errors++;
1566                 if (events & IEVENT_CRL)
1567                         priv->stats.tx_aborted_errors++;
1568                 if (events & IEVENT_XFUN) {
1569                         if (netif_msg_tx_err(priv))
1570                                 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1571                         priv->stats.tx_dropped++;
1572                         priv->extra_stats.tx_underrun++;
1573
1574                         /* Reactivate the Tx Queues */
1575                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1576                 }
1577         }
1578         if (events & IEVENT_BSY) {
1579                 priv->stats.rx_errors++;
1580                 priv->extra_stats.rx_bsy++;
1581
1582                 gfar_receive(irq, dev_id, regs);
1583
1584 #ifndef CONFIG_GFAR_NAPI
1585                 /* Clear the halt bit in RSTAT */
1586                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1587 #endif
1588
1589                 if (netif_msg_rx_err(priv))
1590                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1591                                         dev->name,
1592                                         gfar_read(&priv->regs->rstat));
1593         }
1594         if (events & IEVENT_BABR) {
1595                 priv->stats.rx_errors++;
1596                 priv->extra_stats.rx_babr++;
1597
1598                 if (netif_msg_rx_err(priv))
1599                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1600         }
1601         if (events & IEVENT_EBERR) {
1602                 priv->extra_stats.eberr++;
1603                 if (netif_msg_rx_err(priv))
1604                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1605         }
1606         if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1607                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1608
1609         if (events & IEVENT_BABT) {
1610                 priv->extra_stats.tx_babt++;
1611                 if (netif_msg_rx_err(priv))
1612                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1613         }
1614
1615         return IRQ_HANDLED;
1616 }
1617
1618 /* Called every time the controller might need to be made
1619  * aware of new link state.  The PHY code conveys this
1620  * information through variables in the phydev structure, and this
1621  * function converts those variables into the appropriate
1622  * register values, and can bring down the device if needed.
1623  */
1624 static void adjust_link(struct net_device *dev)
1625 {
1626         struct gfar_private *priv = netdev_priv(dev);
1627         struct gfar __iomem *regs = priv->regs;
1628         unsigned long flags;
1629         struct phy_device *phydev = priv->phydev;
1630         int new_state = 0;
1631
1632         spin_lock_irqsave(&priv->txlock, flags);
1633         if (phydev->link) {
1634                 u32 tempval = gfar_read(&regs->maccfg2);
1635                 u32 ecntrl = gfar_read(&regs->ecntrl);
1636
1637                 /* Now we make sure that we can be in full duplex mode.
1638                  * If not, we operate in half-duplex mode. */
1639                 if (phydev->duplex != priv->oldduplex) {
1640                         new_state = 1;
1641                         if (!(phydev->duplex))
1642                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1643                         else
1644                                 tempval |= MACCFG2_FULL_DUPLEX;
1645
1646                         priv->oldduplex = phydev->duplex;
1647                 }
1648
1649                 if (phydev->speed != priv->oldspeed) {
1650                         new_state = 1;
1651                         switch (phydev->speed) {
1652                         case 1000:
1653                                 tempval =
1654                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1655                                 break;
1656                         case 100:
1657                         case 10:
1658                                 tempval =
1659                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1660
1661                                 /* Reduced mode distinguishes
1662                                  * between 10 and 100 */
1663                                 if (phydev->speed == SPEED_100)
1664                                         ecntrl |= ECNTRL_R100;
1665                                 else
1666                                         ecntrl &= ~(ECNTRL_R100);
1667                                 break;
1668                         default:
1669                                 if (netif_msg_link(priv))
1670                                         printk(KERN_WARNING
1671                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1672                                                 dev->name, phydev->speed);
1673                                 break;
1674                         }
1675
1676                         priv->oldspeed = phydev->speed;
1677                 }
1678
1679                 gfar_write(&regs->maccfg2, tempval);
1680                 gfar_write(&regs->ecntrl, ecntrl);
1681
1682                 if (!priv->oldlink) {
1683                         new_state = 1;
1684                         priv->oldlink = 1;
1685                         netif_schedule(dev);
1686                 }
1687         } else if (priv->oldlink) {
1688                 new_state = 1;
1689                 priv->oldlink = 0;
1690                 priv->oldspeed = 0;
1691                 priv->oldduplex = -1;
1692         }
1693
1694         if (new_state && netif_msg_link(priv))
1695                 phy_print_status(phydev);
1696
1697         spin_unlock_irqrestore(&priv->txlock, flags);
1698 }
1699
1700 /* Update the hash table based on the current list of multicast
1701  * addresses we subscribe to.  Also, change the promiscuity of
1702  * the device based on the flags (this function is called
1703  * whenever dev->flags is changed */
1704 static void gfar_set_multi(struct net_device *dev)
1705 {
1706         struct dev_mc_list *mc_ptr;
1707         struct gfar_private *priv = netdev_priv(dev);
1708         struct gfar __iomem *regs = priv->regs;
1709         u32 tempval;
1710
1711         if(dev->flags & IFF_PROMISC) {
1712                 if (netif_msg_drv(priv))
1713                         printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1714                                         dev->name);
1715                 /* Set RCTRL to PROM */
1716                 tempval = gfar_read(&regs->rctrl);
1717                 tempval |= RCTRL_PROM;
1718                 gfar_write(&regs->rctrl, tempval);
1719         } else {
1720                 /* Set RCTRL to not PROM */
1721                 tempval = gfar_read(&regs->rctrl);
1722                 tempval &= ~(RCTRL_PROM);
1723                 gfar_write(&regs->rctrl, tempval);
1724         }
1725         
1726         if(dev->flags & IFF_ALLMULTI) {
1727                 /* Set the hash to rx all multicast frames */
1728                 gfar_write(&regs->igaddr0, 0xffffffff);
1729                 gfar_write(&regs->igaddr1, 0xffffffff);
1730                 gfar_write(&regs->igaddr2, 0xffffffff);
1731                 gfar_write(&regs->igaddr3, 0xffffffff);
1732                 gfar_write(&regs->igaddr4, 0xffffffff);
1733                 gfar_write(&regs->igaddr5, 0xffffffff);
1734                 gfar_write(&regs->igaddr6, 0xffffffff);
1735                 gfar_write(&regs->igaddr7, 0xffffffff);
1736                 gfar_write(&regs->gaddr0, 0xffffffff);
1737                 gfar_write(&regs->gaddr1, 0xffffffff);
1738                 gfar_write(&regs->gaddr2, 0xffffffff);
1739                 gfar_write(&regs->gaddr3, 0xffffffff);
1740                 gfar_write(&regs->gaddr4, 0xffffffff);
1741                 gfar_write(&regs->gaddr5, 0xffffffff);
1742                 gfar_write(&regs->gaddr6, 0xffffffff);
1743                 gfar_write(&regs->gaddr7, 0xffffffff);
1744         } else {
1745                 int em_num;
1746                 int idx;
1747
1748                 /* zero out the hash */
1749                 gfar_write(&regs->igaddr0, 0x0);
1750                 gfar_write(&regs->igaddr1, 0x0);
1751                 gfar_write(&regs->igaddr2, 0x0);
1752                 gfar_write(&regs->igaddr3, 0x0);
1753                 gfar_write(&regs->igaddr4, 0x0);
1754                 gfar_write(&regs->igaddr5, 0x0);
1755                 gfar_write(&regs->igaddr6, 0x0);
1756                 gfar_write(&regs->igaddr7, 0x0);
1757                 gfar_write(&regs->gaddr0, 0x0);
1758                 gfar_write(&regs->gaddr1, 0x0);
1759                 gfar_write(&regs->gaddr2, 0x0);
1760                 gfar_write(&regs->gaddr3, 0x0);
1761                 gfar_write(&regs->gaddr4, 0x0);
1762                 gfar_write(&regs->gaddr5, 0x0);
1763                 gfar_write(&regs->gaddr6, 0x0);
1764                 gfar_write(&regs->gaddr7, 0x0);
1765
1766                 /* If we have extended hash tables, we need to
1767                  * clear the exact match registers to prepare for
1768                  * setting them */
1769                 if (priv->extended_hash) {
1770                         em_num = GFAR_EM_NUM + 1;
1771                         gfar_clear_exact_match(dev);
1772                         idx = 1;
1773                 } else {
1774                         idx = 0;
1775                         em_num = 0;
1776                 }
1777
1778                 if(dev->mc_count == 0)
1779                         return;
1780
1781                 /* Parse the list, and set the appropriate bits */
1782                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1783                         if (idx < em_num) {
1784                                 gfar_set_mac_for_addr(dev, idx,
1785                                                 mc_ptr->dmi_addr);
1786                                 idx++;
1787                         } else
1788                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1789                 }
1790         }
1791
1792         return;
1793 }
1794
1795
1796 /* Clears each of the exact match registers to zero, so they
1797  * don't interfere with normal reception */
1798 static void gfar_clear_exact_match(struct net_device *dev)
1799 {
1800         int idx;
1801         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1802
1803         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1804                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1805 }
1806
1807 /* Set the appropriate hash bit for the given addr */
1808 /* The algorithm works like so:
1809  * 1) Take the Destination Address (ie the multicast address), and
1810  * do a CRC on it (little endian), and reverse the bits of the
1811  * result.
1812  * 2) Use the 8 most significant bits as a hash into a 256-entry
1813  * table.  The table is controlled through 8 32-bit registers:
1814  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1815  * gaddr7.  This means that the 3 most significant bits in the
1816  * hash index which gaddr register to use, and the 5 other bits
1817  * indicate which bit (assuming an IBM numbering scheme, which
1818  * for PowerPC (tm) is usually the case) in the register holds
1819  * the entry. */
1820 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1821 {
1822         u32 tempval;
1823         struct gfar_private *priv = netdev_priv(dev);
1824         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1825         int width = priv->hash_width;
1826         u8 whichbit = (result >> (32 - width)) & 0x1f;
1827         u8 whichreg = result >> (32 - width + 5);
1828         u32 value = (1 << (31-whichbit));
1829
1830         tempval = gfar_read(priv->hash_regs[whichreg]);
1831         tempval |= value;
1832         gfar_write(priv->hash_regs[whichreg], tempval);
1833
1834         return;
1835 }
1836
1837
1838 /* There are multiple MAC Address register pairs on some controllers
1839  * This function sets the numth pair to a given address
1840  */
1841 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1842 {
1843         struct gfar_private *priv = netdev_priv(dev);
1844         int idx;
1845         char tmpbuf[MAC_ADDR_LEN];
1846         u32 tempval;
1847         u32 __iomem *macptr = &priv->regs->macstnaddr1;
1848
1849         macptr += num*2;
1850
1851         /* Now copy it into the mac registers backwards, cuz */
1852         /* little endian is silly */
1853         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1854                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1855
1856         gfar_write(macptr, *((u32 *) (tmpbuf)));
1857
1858         tempval = *((u32 *) (tmpbuf + 4));
1859
1860         gfar_write(macptr+1, tempval);
1861 }
1862
1863 /* GFAR error interrupt handler */
1864 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1865 {
1866         struct net_device *dev = dev_id;
1867         struct gfar_private *priv = netdev_priv(dev);
1868
1869         /* Save ievent for future reference */
1870         u32 events = gfar_read(&priv->regs->ievent);
1871
1872         /* Clear IEVENT */
1873         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1874
1875         /* Hmm... */
1876         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1877                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1878                                 dev->name, events, gfar_read(&priv->regs->imask));
1879
1880         /* Update the error counters */
1881         if (events & IEVENT_TXE) {
1882                 priv->stats.tx_errors++;
1883
1884                 if (events & IEVENT_LC)
1885                         priv->stats.tx_window_errors++;
1886                 if (events & IEVENT_CRL)
1887                         priv->stats.tx_aborted_errors++;
1888                 if (events & IEVENT_XFUN) {
1889                         if (netif_msg_tx_err(priv))
1890                                 printk(KERN_DEBUG "%s: underrun.  packet dropped.\n",
1891                                                 dev->name);
1892                         priv->stats.tx_dropped++;
1893                         priv->extra_stats.tx_underrun++;
1894
1895                         /* Reactivate the Tx Queues */
1896                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1897                 }
1898                 if (netif_msg_tx_err(priv))
1899                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1900         }
1901         if (events & IEVENT_BSY) {
1902                 priv->stats.rx_errors++;
1903                 priv->extra_stats.rx_bsy++;
1904
1905                 gfar_receive(irq, dev_id, regs);
1906
1907 #ifndef CONFIG_GFAR_NAPI
1908                 /* Clear the halt bit in RSTAT */
1909                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1910 #endif
1911
1912                 if (netif_msg_rx_err(priv))
1913                         printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1914                                         dev->name,
1915                                         gfar_read(&priv->regs->rstat));
1916         }
1917         if (events & IEVENT_BABR) {
1918                 priv->stats.rx_errors++;
1919                 priv->extra_stats.rx_babr++;
1920
1921                 if (netif_msg_rx_err(priv))
1922                         printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1923         }
1924         if (events & IEVENT_EBERR) {
1925                 priv->extra_stats.eberr++;
1926                 if (netif_msg_rx_err(priv))
1927                         printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1928         }
1929         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1930                 if (netif_msg_rx_status(priv))
1931                         printk(KERN_DEBUG "%s: control frame\n", dev->name);
1932
1933         if (events & IEVENT_BABT) {
1934                 priv->extra_stats.tx_babt++;
1935                 if (netif_msg_tx_err(priv))
1936                         printk(KERN_DEBUG "%s: babt error\n", dev->name);
1937         }
1938         return IRQ_HANDLED;
1939 }
1940
1941 /* Structure for a device driver */
1942 static struct platform_driver gfar_driver = {
1943         .probe = gfar_probe,
1944         .remove = gfar_remove,
1945         .driver = {
1946                 .name = "fsl-gianfar",
1947         },
1948 };
1949
1950 static int __init gfar_init(void)
1951 {
1952         int err = gfar_mdio_init();
1953
1954         if (err)
1955                 return err;
1956
1957         err = platform_driver_register(&gfar_driver);
1958
1959         if (err)
1960                 gfar_mdio_exit();
1961         
1962         return err;
1963 }
1964
1965 static void __exit gfar_exit(void)
1966 {
1967         platform_driver_unregister(&gfar_driver);
1968         gfar_mdio_exit();
1969 }
1970
1971 module_init(gfar_init);
1972 module_exit(gfar_exit);
1973