Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  *
12  * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13  * Copyright (c) 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through platform_device.  Structures which
29  *  define the configuration needed by the board are defined in a
30  *  board structure in arch/ppc/platforms (though I do not
31  *  discount the possibility that other architectures could one
32  *  day be supported.
33  *
34  *  The Gianfar Ethernet Controller uses a ring of buffer
35  *  descriptors.  The beginning is indicated by a register
36  *  pointing to the physical address of the start of the ring.
37  *  The end is determined by a "wrap" bit being set in the
38  *  last descriptor of the ring.
39  *
40  *  When a packet is received, the RXF bit in the
41  *  IEVENT register is set, triggering an interrupt when the
42  *  corresponding bit in the IMASK register is also set (if
43  *  interrupt coalescing is active, then the interrupt may not
44  *  happen immediately, but will wait until either a set number
45  *  of frames or amount of time have passed).  In NAPI, the
46  *  interrupt handler will signal there is work to be done, and
47  *  exit.  Without NAPI, the packet(s) will be handled
48  *  immediately.  Both methods will start at the last known empty
49  *  descriptor, and process every subsequent descriptor until there
50  *  are none left with data (NAPI will stop after a set number of
51  *  packets to give time to other tasks, but will eventually
52  *  process all the packets).  The data arrives inside a
53  *  pre-allocated skb, and so after the skb is passed up to the
54  *  stack, a new skb must be allocated, and the address field in
55  *  the buffer descriptor must be updated to indicate this new
56  *  skb.
57  *
58  *  When the kernel requests that a packet be transmitted, the
59  *  driver starts where it left off last time, and points the
60  *  descriptor at the buffer which was passed in.  The driver
61  *  then informs the DMA engine that there are packets ready to
62  *  be transmitted.  Once the controller is finished transmitting
63  *  the packet, an interrupt may be triggered (under the same
64  *  conditions as for reception, but depending on the TXF bit).
65  *  The driver then cleans up the buffer.
66  */
67
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
96
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
103
104 #ifdef CONFIG_GFAR_NAPI
105 #define RECEIVE(x) netif_receive_skb(x)
106 #else
107 #define RECEIVE(x) netif_rx(x)
108 #endif
109
110 const char gfar_driver_name[] = "Gianfar Ethernet";
111 const char gfar_driver_version[] = "1.3";
112
113 static int gfar_enet_open(struct net_device *dev);
114 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
115 static void gfar_timeout(struct net_device *dev);
116 static int gfar_close(struct net_device *dev);
117 struct sk_buff *gfar_new_skb(struct net_device *dev);
118 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
119                 struct sk_buff *skb);
120 static int gfar_set_mac_address(struct net_device *dev);
121 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122 static irqreturn_t gfar_error(int irq, void *dev_id);
123 static irqreturn_t gfar_transmit(int irq, void *dev_id);
124 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
125 static void adjust_link(struct net_device *dev);
126 static void init_registers(struct net_device *dev);
127 static int init_phy(struct net_device *dev);
128 static int gfar_probe(struct platform_device *pdev);
129 static int gfar_remove(struct platform_device *pdev);
130 static void free_skb_resources(struct gfar_private *priv);
131 static void gfar_set_multi(struct net_device *dev);
132 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133 static void gfar_configure_serdes(struct net_device *dev);
134 #ifdef CONFIG_GFAR_NAPI
135 static int gfar_poll(struct napi_struct *napi, int budget);
136 #endif
137 #ifdef CONFIG_NET_POLL_CONTROLLER
138 static void gfar_netpoll(struct net_device *dev);
139 #endif
140 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
141 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
142 static void gfar_vlan_rx_register(struct net_device *netdev,
143                                 struct vlan_group *grp);
144 void gfar_halt(struct net_device *dev);
145 void gfar_start(struct net_device *dev);
146 static void gfar_clear_exact_match(struct net_device *dev);
147 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
148
149 extern const struct ethtool_ops gfar_ethtool_ops;
150
151 MODULE_AUTHOR("Freescale Semiconductor, Inc");
152 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153 MODULE_LICENSE("GPL");
154
155 /* Returns 1 if incoming frames use an FCB */
156 static inline int gfar_uses_fcb(struct gfar_private *priv)
157 {
158         return (priv->vlan_enable || priv->rx_csum_enable);
159 }
160
161 /* Set up the ethernet device structure, private data,
162  * and anything else we need before we start */
163 static int gfar_probe(struct platform_device *pdev)
164 {
165         u32 tempval;
166         struct net_device *dev = NULL;
167         struct gfar_private *priv = NULL;
168         struct gianfar_platform_data *einfo;
169         struct resource *r;
170         int err = 0;
171         DECLARE_MAC_BUF(mac);
172
173         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
174
175         if (NULL == einfo) {
176                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
177                        pdev->id);
178
179                 return -ENODEV;
180         }
181
182         /* Create an ethernet device instance */
183         dev = alloc_etherdev(sizeof (*priv));
184
185         if (NULL == dev)
186                 return -ENOMEM;
187
188         priv = netdev_priv(dev);
189         priv->dev = dev;
190
191         /* Set the info in the priv to the current info */
192         priv->einfo = einfo;
193
194         /* fill out IRQ fields */
195         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
196                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
197                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
198                 priv->interruptError = platform_get_irq_byname(pdev, "error");
199                 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
200                         goto regs_fail;
201         } else {
202                 priv->interruptTransmit = platform_get_irq(pdev, 0);
203                 if (priv->interruptTransmit < 0)
204                         goto regs_fail;
205         }
206
207         /* get a pointer to the register memory */
208         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
209         priv->regs = ioremap(r->start, sizeof (struct gfar));
210
211         if (NULL == priv->regs) {
212                 err = -ENOMEM;
213                 goto regs_fail;
214         }
215
216         spin_lock_init(&priv->txlock);
217         spin_lock_init(&priv->rxlock);
218
219         platform_set_drvdata(pdev, dev);
220
221         /* Stop the DMA engine now, in case it was running before */
222         /* (The firmware could have used it, and left it running). */
223         /* To do this, we write Graceful Receive Stop and Graceful */
224         /* Transmit Stop, and then wait until the corresponding bits */
225         /* in IEVENT indicate the stops have completed. */
226         tempval = gfar_read(&priv->regs->dmactrl);
227         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
228         gfar_write(&priv->regs->dmactrl, tempval);
229
230         tempval = gfar_read(&priv->regs->dmactrl);
231         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
232         gfar_write(&priv->regs->dmactrl, tempval);
233
234         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
235                 cpu_relax();
236
237         /* Reset MAC layer */
238         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
239
240         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
241         gfar_write(&priv->regs->maccfg1, tempval);
242
243         /* Initialize MACCFG2. */
244         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
245
246         /* Initialize ECNTRL */
247         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
248
249         /* Copy the station address into the dev structure, */
250         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
251
252         /* Set the dev->base_addr to the gfar reg region */
253         dev->base_addr = (unsigned long) (priv->regs);
254
255         SET_NETDEV_DEV(dev, &pdev->dev);
256
257         /* Fill in the dev structure */
258         dev->open = gfar_enet_open;
259         dev->hard_start_xmit = gfar_start_xmit;
260         dev->tx_timeout = gfar_timeout;
261         dev->watchdog_timeo = TX_TIMEOUT;
262 #ifdef CONFIG_GFAR_NAPI
263         netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
264 #endif
265 #ifdef CONFIG_NET_POLL_CONTROLLER
266         dev->poll_controller = gfar_netpoll;
267 #endif
268         dev->stop = gfar_close;
269         dev->change_mtu = gfar_change_mtu;
270         dev->mtu = 1500;
271         dev->set_multicast_list = gfar_set_multi;
272
273         dev->ethtool_ops = &gfar_ethtool_ops;
274
275         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
276                 priv->rx_csum_enable = 1;
277                 dev->features |= NETIF_F_IP_CSUM;
278         } else
279                 priv->rx_csum_enable = 0;
280
281         priv->vlgrp = NULL;
282
283         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
284                 dev->vlan_rx_register = gfar_vlan_rx_register;
285
286                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
287
288                 priv->vlan_enable = 1;
289         }
290
291         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
292                 priv->extended_hash = 1;
293                 priv->hash_width = 9;
294
295                 priv->hash_regs[0] = &priv->regs->igaddr0;
296                 priv->hash_regs[1] = &priv->regs->igaddr1;
297                 priv->hash_regs[2] = &priv->regs->igaddr2;
298                 priv->hash_regs[3] = &priv->regs->igaddr3;
299                 priv->hash_regs[4] = &priv->regs->igaddr4;
300                 priv->hash_regs[5] = &priv->regs->igaddr5;
301                 priv->hash_regs[6] = &priv->regs->igaddr6;
302                 priv->hash_regs[7] = &priv->regs->igaddr7;
303                 priv->hash_regs[8] = &priv->regs->gaddr0;
304                 priv->hash_regs[9] = &priv->regs->gaddr1;
305                 priv->hash_regs[10] = &priv->regs->gaddr2;
306                 priv->hash_regs[11] = &priv->regs->gaddr3;
307                 priv->hash_regs[12] = &priv->regs->gaddr4;
308                 priv->hash_regs[13] = &priv->regs->gaddr5;
309                 priv->hash_regs[14] = &priv->regs->gaddr6;
310                 priv->hash_regs[15] = &priv->regs->gaddr7;
311
312         } else {
313                 priv->extended_hash = 0;
314                 priv->hash_width = 8;
315
316                 priv->hash_regs[0] = &priv->regs->gaddr0;
317                 priv->hash_regs[1] = &priv->regs->gaddr1;
318                 priv->hash_regs[2] = &priv->regs->gaddr2;
319                 priv->hash_regs[3] = &priv->regs->gaddr3;
320                 priv->hash_regs[4] = &priv->regs->gaddr4;
321                 priv->hash_regs[5] = &priv->regs->gaddr5;
322                 priv->hash_regs[6] = &priv->regs->gaddr6;
323                 priv->hash_regs[7] = &priv->regs->gaddr7;
324         }
325
326         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
327                 priv->padding = DEFAULT_PADDING;
328         else
329                 priv->padding = 0;
330
331         if (dev->features & NETIF_F_IP_CSUM)
332                 dev->hard_header_len += GMAC_FCB_LEN;
333
334         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
335         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
336         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
337
338         priv->txcoalescing = DEFAULT_TX_COALESCE;
339         priv->txcount = DEFAULT_TXCOUNT;
340         priv->txtime = DEFAULT_TXTIME;
341         priv->rxcoalescing = DEFAULT_RX_COALESCE;
342         priv->rxcount = DEFAULT_RXCOUNT;
343         priv->rxtime = DEFAULT_RXTIME;
344
345         /* Enable most messages by default */
346         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
347
348         err = register_netdev(dev);
349
350         if (err) {
351                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
352                                 dev->name);
353                 goto register_fail;
354         }
355
356         /* Create all the sysfs files */
357         gfar_init_sysfs(dev);
358
359         /* Print out the device info */
360         printk(KERN_INFO DEVICE_NAME "%s\n",
361                dev->name, print_mac(mac, dev->dev_addr));
362
363         /* Even more device info helps when determining which kernel */
364         /* provided which set of benchmarks. */
365 #ifdef CONFIG_GFAR_NAPI
366         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
367 #else
368         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
369 #endif
370         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
371                dev->name, priv->rx_ring_size, priv->tx_ring_size);
372
373         return 0;
374
375 register_fail:
376         iounmap(priv->regs);
377 regs_fail:
378         free_netdev(dev);
379         return err;
380 }
381
382 static int gfar_remove(struct platform_device *pdev)
383 {
384         struct net_device *dev = platform_get_drvdata(pdev);
385         struct gfar_private *priv = netdev_priv(dev);
386
387         platform_set_drvdata(pdev, NULL);
388
389         iounmap(priv->regs);
390         free_netdev(dev);
391
392         return 0;
393 }
394
395
396 /* Reads the controller's registers to determine what interface
397  * connects it to the PHY.
398  */
399 static phy_interface_t gfar_get_interface(struct net_device *dev)
400 {
401         struct gfar_private *priv = netdev_priv(dev);
402         u32 ecntrl = gfar_read(&priv->regs->ecntrl);
403
404         if (ecntrl & ECNTRL_SGMII_MODE)
405                 return PHY_INTERFACE_MODE_SGMII;
406
407         if (ecntrl & ECNTRL_TBI_MODE) {
408                 if (ecntrl & ECNTRL_REDUCED_MODE)
409                         return PHY_INTERFACE_MODE_RTBI;
410                 else
411                         return PHY_INTERFACE_MODE_TBI;
412         }
413
414         if (ecntrl & ECNTRL_REDUCED_MODE) {
415                 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
416                         return PHY_INTERFACE_MODE_RMII;
417                 else {
418                         phy_interface_t interface = priv->einfo->interface;
419
420                         /*
421                          * This isn't autodetected right now, so it must
422                          * be set by the device tree or platform code.
423                          */
424                         if (interface == PHY_INTERFACE_MODE_RGMII_ID)
425                                 return PHY_INTERFACE_MODE_RGMII_ID;
426
427                         return PHY_INTERFACE_MODE_RGMII;
428                 }
429         }
430
431         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
432                 return PHY_INTERFACE_MODE_GMII;
433
434         return PHY_INTERFACE_MODE_MII;
435 }
436
437
438 /* Initializes driver's PHY state, and attaches to the PHY.
439  * Returns 0 on success.
440  */
441 static int init_phy(struct net_device *dev)
442 {
443         struct gfar_private *priv = netdev_priv(dev);
444         uint gigabit_support =
445                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
446                 SUPPORTED_1000baseT_Full : 0;
447         struct phy_device *phydev;
448         char phy_id[BUS_ID_SIZE];
449         phy_interface_t interface;
450
451         priv->oldlink = 0;
452         priv->oldspeed = 0;
453         priv->oldduplex = -1;
454
455         snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
456
457         interface = gfar_get_interface(dev);
458
459         phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
460
461         if (interface == PHY_INTERFACE_MODE_SGMII)
462                 gfar_configure_serdes(dev);
463
464         if (IS_ERR(phydev)) {
465                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
466                 return PTR_ERR(phydev);
467         }
468
469         /* Remove any features not supported by the controller */
470         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
471         phydev->advertising = phydev->supported;
472
473         priv->phydev = phydev;
474
475         return 0;
476 }
477
478 /*
479  * Initialize TBI PHY interface for communicating with the
480  * SERDES lynx PHY on the chip.  We communicate with this PHY
481  * through the MDIO bus on each controller, treating it as a
482  * "normal" PHY at the address found in the TBIPA register.  We assume
483  * that the TBIPA register is valid.  Either the MDIO bus code will set
484  * it to a value that doesn't conflict with other PHYs on the bus, or the
485  * value doesn't matter, as there are no other PHYs on the bus.
486  */
487 static void gfar_configure_serdes(struct net_device *dev)
488 {
489         struct gfar_private *priv = netdev_priv(dev);
490         struct gfar_mii __iomem *regs =
491                         (void __iomem *)&priv->regs->gfar_mii_regs;
492         int tbipa = gfar_read(&priv->regs->tbipa);
493
494         /* Single clk mode, mii mode off(for serdes communication) */
495         gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
496
497         gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
498                         ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
499                         ADVERTISE_1000XPSE_ASYM);
500
501         gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
502                         BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
503 }
504
505 static void init_registers(struct net_device *dev)
506 {
507         struct gfar_private *priv = netdev_priv(dev);
508
509         /* Clear IEVENT */
510         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
511
512         /* Initialize IMASK */
513         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
514
515         /* Init hash registers to zero */
516         gfar_write(&priv->regs->igaddr0, 0);
517         gfar_write(&priv->regs->igaddr1, 0);
518         gfar_write(&priv->regs->igaddr2, 0);
519         gfar_write(&priv->regs->igaddr3, 0);
520         gfar_write(&priv->regs->igaddr4, 0);
521         gfar_write(&priv->regs->igaddr5, 0);
522         gfar_write(&priv->regs->igaddr6, 0);
523         gfar_write(&priv->regs->igaddr7, 0);
524
525         gfar_write(&priv->regs->gaddr0, 0);
526         gfar_write(&priv->regs->gaddr1, 0);
527         gfar_write(&priv->regs->gaddr2, 0);
528         gfar_write(&priv->regs->gaddr3, 0);
529         gfar_write(&priv->regs->gaddr4, 0);
530         gfar_write(&priv->regs->gaddr5, 0);
531         gfar_write(&priv->regs->gaddr6, 0);
532         gfar_write(&priv->regs->gaddr7, 0);
533
534         /* Zero out the rmon mib registers if it has them */
535         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
536                 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
537
538                 /* Mask off the CAM interrupts */
539                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
540                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
541         }
542
543         /* Initialize the max receive buffer length */
544         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
545
546         /* Initialize the Minimum Frame Length Register */
547         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
548 }
549
550
551 /* Halt the receive and transmit queues */
552 void gfar_halt(struct net_device *dev)
553 {
554         struct gfar_private *priv = netdev_priv(dev);
555         struct gfar __iomem *regs = priv->regs;
556         u32 tempval;
557
558         /* Mask all interrupts */
559         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
560
561         /* Clear all interrupts */
562         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
563
564         /* Stop the DMA, and wait for it to stop */
565         tempval = gfar_read(&priv->regs->dmactrl);
566         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
567             != (DMACTRL_GRS | DMACTRL_GTS)) {
568                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
569                 gfar_write(&priv->regs->dmactrl, tempval);
570
571                 while (!(gfar_read(&priv->regs->ievent) &
572                          (IEVENT_GRSC | IEVENT_GTSC)))
573                         cpu_relax();
574         }
575
576         /* Disable Rx and Tx */
577         tempval = gfar_read(&regs->maccfg1);
578         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
579         gfar_write(&regs->maccfg1, tempval);
580 }
581
582 void stop_gfar(struct net_device *dev)
583 {
584         struct gfar_private *priv = netdev_priv(dev);
585         struct gfar __iomem *regs = priv->regs;
586         unsigned long flags;
587
588         phy_stop(priv->phydev);
589
590         /* Lock it down */
591         spin_lock_irqsave(&priv->txlock, flags);
592         spin_lock(&priv->rxlock);
593
594         gfar_halt(dev);
595
596         spin_unlock(&priv->rxlock);
597         spin_unlock_irqrestore(&priv->txlock, flags);
598
599         /* Free the IRQs */
600         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
601                 free_irq(priv->interruptError, dev);
602                 free_irq(priv->interruptTransmit, dev);
603                 free_irq(priv->interruptReceive, dev);
604         } else {
605                 free_irq(priv->interruptTransmit, dev);
606         }
607
608         free_skb_resources(priv);
609
610         dma_free_coherent(&dev->dev,
611                         sizeof(struct txbd8)*priv->tx_ring_size
612                         + sizeof(struct rxbd8)*priv->rx_ring_size,
613                         priv->tx_bd_base,
614                         gfar_read(&regs->tbase0));
615 }
616
617 /* If there are any tx skbs or rx skbs still around, free them.
618  * Then free tx_skbuff and rx_skbuff */
619 static void free_skb_resources(struct gfar_private *priv)
620 {
621         struct rxbd8 *rxbdp;
622         struct txbd8 *txbdp;
623         int i;
624
625         /* Go through all the buffer descriptors and free their data buffers */
626         txbdp = priv->tx_bd_base;
627
628         for (i = 0; i < priv->tx_ring_size; i++) {
629
630                 if (priv->tx_skbuff[i]) {
631                         dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
632                                         txbdp->length,
633                                         DMA_TO_DEVICE);
634                         dev_kfree_skb_any(priv->tx_skbuff[i]);
635                         priv->tx_skbuff[i] = NULL;
636                 }
637         }
638
639         kfree(priv->tx_skbuff);
640
641         rxbdp = priv->rx_bd_base;
642
643         /* rx_skbuff is not guaranteed to be allocated, so only
644          * free it and its contents if it is allocated */
645         if(priv->rx_skbuff != NULL) {
646                 for (i = 0; i < priv->rx_ring_size; i++) {
647                         if (priv->rx_skbuff[i]) {
648                                 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
649                                                 priv->rx_buffer_size,
650                                                 DMA_FROM_DEVICE);
651
652                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
653                                 priv->rx_skbuff[i] = NULL;
654                         }
655
656                         rxbdp->status = 0;
657                         rxbdp->length = 0;
658                         rxbdp->bufPtr = 0;
659
660                         rxbdp++;
661                 }
662
663                 kfree(priv->rx_skbuff);
664         }
665 }
666
667 void gfar_start(struct net_device *dev)
668 {
669         struct gfar_private *priv = netdev_priv(dev);
670         struct gfar __iomem *regs = priv->regs;
671         u32 tempval;
672
673         /* Enable Rx and Tx in MACCFG1 */
674         tempval = gfar_read(&regs->maccfg1);
675         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
676         gfar_write(&regs->maccfg1, tempval);
677
678         /* Initialize DMACTRL to have WWR and WOP */
679         tempval = gfar_read(&priv->regs->dmactrl);
680         tempval |= DMACTRL_INIT_SETTINGS;
681         gfar_write(&priv->regs->dmactrl, tempval);
682
683         /* Make sure we aren't stopped */
684         tempval = gfar_read(&priv->regs->dmactrl);
685         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
686         gfar_write(&priv->regs->dmactrl, tempval);
687
688         /* Clear THLT/RHLT, so that the DMA starts polling now */
689         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
690         gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
691
692         /* Unmask the interrupts we look for */
693         gfar_write(&regs->imask, IMASK_DEFAULT);
694 }
695
696 /* Bring the controller up and running */
697 int startup_gfar(struct net_device *dev)
698 {
699         struct txbd8 *txbdp;
700         struct rxbd8 *rxbdp;
701         dma_addr_t addr = 0;
702         unsigned long vaddr;
703         int i;
704         struct gfar_private *priv = netdev_priv(dev);
705         struct gfar __iomem *regs = priv->regs;
706         int err = 0;
707         u32 rctrl = 0;
708         u32 attrs = 0;
709
710         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
711
712         /* Allocate memory for the buffer descriptors */
713         vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
714                         sizeof (struct txbd8) * priv->tx_ring_size +
715                         sizeof (struct rxbd8) * priv->rx_ring_size,
716                         &addr, GFP_KERNEL);
717
718         if (vaddr == 0) {
719                 if (netif_msg_ifup(priv))
720                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
721                                         dev->name);
722                 return -ENOMEM;
723         }
724
725         priv->tx_bd_base = (struct txbd8 *) vaddr;
726
727         /* enet DMA only understands physical addresses */
728         gfar_write(&regs->tbase0, addr);
729
730         /* Start the rx descriptor ring where the tx ring leaves off */
731         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
732         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
733         priv->rx_bd_base = (struct rxbd8 *) vaddr;
734         gfar_write(&regs->rbase0, addr);
735
736         /* Setup the skbuff rings */
737         priv->tx_skbuff =
738             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
739                                         priv->tx_ring_size, GFP_KERNEL);
740
741         if (NULL == priv->tx_skbuff) {
742                 if (netif_msg_ifup(priv))
743                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
744                                         dev->name);
745                 err = -ENOMEM;
746                 goto tx_skb_fail;
747         }
748
749         for (i = 0; i < priv->tx_ring_size; i++)
750                 priv->tx_skbuff[i] = NULL;
751
752         priv->rx_skbuff =
753             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
754                                         priv->rx_ring_size, GFP_KERNEL);
755
756         if (NULL == priv->rx_skbuff) {
757                 if (netif_msg_ifup(priv))
758                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
759                                         dev->name);
760                 err = -ENOMEM;
761                 goto rx_skb_fail;
762         }
763
764         for (i = 0; i < priv->rx_ring_size; i++)
765                 priv->rx_skbuff[i] = NULL;
766
767         /* Initialize some variables in our dev structure */
768         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
769         priv->cur_rx = priv->rx_bd_base;
770         priv->skb_curtx = priv->skb_dirtytx = 0;
771         priv->skb_currx = 0;
772
773         /* Initialize Transmit Descriptor Ring */
774         txbdp = priv->tx_bd_base;
775         for (i = 0; i < priv->tx_ring_size; i++) {
776                 txbdp->status = 0;
777                 txbdp->length = 0;
778                 txbdp->bufPtr = 0;
779                 txbdp++;
780         }
781
782         /* Set the last descriptor in the ring to indicate wrap */
783         txbdp--;
784         txbdp->status |= TXBD_WRAP;
785
786         rxbdp = priv->rx_bd_base;
787         for (i = 0; i < priv->rx_ring_size; i++) {
788                 struct sk_buff *skb;
789
790                 skb = gfar_new_skb(dev);
791
792                 if (!skb) {
793                         printk(KERN_ERR "%s: Can't allocate RX buffers\n",
794                                         dev->name);
795
796                         goto err_rxalloc_fail;
797                 }
798
799                 priv->rx_skbuff[i] = skb;
800
801                 gfar_new_rxbdp(dev, rxbdp, skb);
802
803                 rxbdp++;
804         }
805
806         /* Set the last descriptor in the ring to wrap */
807         rxbdp--;
808         rxbdp->status |= RXBD_WRAP;
809
810         /* If the device has multiple interrupts, register for
811          * them.  Otherwise, only register for the one */
812         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
813                 /* Install our interrupt handlers for Error,
814                  * Transmit, and Receive */
815                 if (request_irq(priv->interruptError, gfar_error,
816                                 0, "enet_error", dev) < 0) {
817                         if (netif_msg_intr(priv))
818                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
819                                         dev->name, priv->interruptError);
820
821                         err = -1;
822                         goto err_irq_fail;
823                 }
824
825                 if (request_irq(priv->interruptTransmit, gfar_transmit,
826                                 0, "enet_tx", dev) < 0) {
827                         if (netif_msg_intr(priv))
828                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
829                                         dev->name, priv->interruptTransmit);
830
831                         err = -1;
832
833                         goto tx_irq_fail;
834                 }
835
836                 if (request_irq(priv->interruptReceive, gfar_receive,
837                                 0, "enet_rx", dev) < 0) {
838                         if (netif_msg_intr(priv))
839                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
840                                                 dev->name, priv->interruptReceive);
841
842                         err = -1;
843                         goto rx_irq_fail;
844                 }
845         } else {
846                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
847                                 0, "gfar_interrupt", dev) < 0) {
848                         if (netif_msg_intr(priv))
849                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
850                                         dev->name, priv->interruptError);
851
852                         err = -1;
853                         goto err_irq_fail;
854                 }
855         }
856
857         phy_start(priv->phydev);
858
859         /* Configure the coalescing support */
860         if (priv->txcoalescing)
861                 gfar_write(&regs->txic,
862                            mk_ic_value(priv->txcount, priv->txtime));
863         else
864                 gfar_write(&regs->txic, 0);
865
866         if (priv->rxcoalescing)
867                 gfar_write(&regs->rxic,
868                            mk_ic_value(priv->rxcount, priv->rxtime));
869         else
870                 gfar_write(&regs->rxic, 0);
871
872         if (priv->rx_csum_enable)
873                 rctrl |= RCTRL_CHECKSUMMING;
874
875         if (priv->extended_hash) {
876                 rctrl |= RCTRL_EXTHASH;
877
878                 gfar_clear_exact_match(dev);
879                 rctrl |= RCTRL_EMEN;
880         }
881
882         if (priv->vlan_enable)
883                 rctrl |= RCTRL_VLAN;
884
885         if (priv->padding) {
886                 rctrl &= ~RCTRL_PAL_MASK;
887                 rctrl |= RCTRL_PADDING(priv->padding);
888         }
889
890         /* Init rctrl based on our settings */
891         gfar_write(&priv->regs->rctrl, rctrl);
892
893         if (dev->features & NETIF_F_IP_CSUM)
894                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
895
896         /* Set the extraction length and index */
897         attrs = ATTRELI_EL(priv->rx_stash_size) |
898                 ATTRELI_EI(priv->rx_stash_index);
899
900         gfar_write(&priv->regs->attreli, attrs);
901
902         /* Start with defaults, and add stashing or locking
903          * depending on the approprate variables */
904         attrs = ATTR_INIT_SETTINGS;
905
906         if (priv->bd_stash_en)
907                 attrs |= ATTR_BDSTASH;
908
909         if (priv->rx_stash_size != 0)
910                 attrs |= ATTR_BUFSTASH;
911
912         gfar_write(&priv->regs->attr, attrs);
913
914         gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
915         gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
916         gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
917
918         /* Start the controller */
919         gfar_start(dev);
920
921         return 0;
922
923 rx_irq_fail:
924         free_irq(priv->interruptTransmit, dev);
925 tx_irq_fail:
926         free_irq(priv->interruptError, dev);
927 err_irq_fail:
928 err_rxalloc_fail:       
929 rx_skb_fail:
930         free_skb_resources(priv);
931 tx_skb_fail:
932         dma_free_coherent(&dev->dev,
933                         sizeof(struct txbd8)*priv->tx_ring_size
934                         + sizeof(struct rxbd8)*priv->rx_ring_size,
935                         priv->tx_bd_base,
936                         gfar_read(&regs->tbase0));
937
938         return err;
939 }
940
941 /* Called when something needs to use the ethernet device */
942 /* Returns 0 for success. */
943 static int gfar_enet_open(struct net_device *dev)
944 {
945 #ifdef CONFIG_GFAR_NAPI
946         struct gfar_private *priv = netdev_priv(dev);
947 #endif
948         int err;
949
950 #ifdef CONFIG_GFAR_NAPI
951         napi_enable(&priv->napi);
952 #endif
953
954         /* Initialize a bunch of registers */
955         init_registers(dev);
956
957         gfar_set_mac_address(dev);
958
959         err = init_phy(dev);
960
961         if(err) {
962 #ifdef CONFIG_GFAR_NAPI
963                 napi_disable(&priv->napi);
964 #endif
965                 return err;
966         }
967
968         err = startup_gfar(dev);
969         if (err) {
970 #ifdef CONFIG_GFAR_NAPI
971                 napi_disable(&priv->napi);
972 #endif
973                 return err;
974         }
975
976         netif_start_queue(dev);
977
978         return err;
979 }
980
981 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
982 {
983         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
984
985         memset(fcb, 0, GMAC_FCB_LEN);
986
987         return fcb;
988 }
989
990 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
991 {
992         u8 flags = 0;
993
994         /* If we're here, it's a IP packet with a TCP or UDP
995          * payload.  We set it to checksum, using a pseudo-header
996          * we provide
997          */
998         flags = TXFCB_DEFAULT;
999
1000         /* Tell the controller what the protocol is */
1001         /* And provide the already calculated phcs */
1002         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1003                 flags |= TXFCB_UDP;
1004                 fcb->phcs = udp_hdr(skb)->check;
1005         } else
1006                 fcb->phcs = tcp_hdr(skb)->check;
1007
1008         /* l3os is the distance between the start of the
1009          * frame (skb->data) and the start of the IP hdr.
1010          * l4os is the distance between the start of the
1011          * l3 hdr and the l4 hdr */
1012         fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1013         fcb->l4os = skb_network_header_len(skb);
1014
1015         fcb->flags = flags;
1016 }
1017
1018 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1019 {
1020         fcb->flags |= TXFCB_VLN;
1021         fcb->vlctl = vlan_tx_tag_get(skb);
1022 }
1023
1024 /* This is called by the kernel when a frame is ready for transmission. */
1025 /* It is pointed to by the dev->hard_start_xmit function pointer */
1026 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 {
1028         struct gfar_private *priv = netdev_priv(dev);
1029         struct txfcb *fcb = NULL;
1030         struct txbd8 *txbdp;
1031         u16 status;
1032         unsigned long flags;
1033
1034         /* Update transmit stats */
1035         dev->stats.tx_bytes += skb->len;
1036
1037         /* Lock priv now */
1038         spin_lock_irqsave(&priv->txlock, flags);
1039
1040         /* Point at the first free tx descriptor */
1041         txbdp = priv->cur_tx;
1042
1043         /* Clear all but the WRAP status flags */
1044         status = txbdp->status & TXBD_WRAP;
1045
1046         /* Set up checksumming */
1047         if (likely((dev->features & NETIF_F_IP_CSUM)
1048                         && (CHECKSUM_PARTIAL == skb->ip_summed))) {
1049                 fcb = gfar_add_fcb(skb, txbdp);
1050                 status |= TXBD_TOE;
1051                 gfar_tx_checksum(skb, fcb);
1052         }
1053
1054         if (priv->vlan_enable &&
1055                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1056                 if (unlikely(NULL == fcb)) {
1057                         fcb = gfar_add_fcb(skb, txbdp);
1058                         status |= TXBD_TOE;
1059                 }
1060
1061                 gfar_tx_vlan(skb, fcb);
1062         }
1063
1064         /* Set buffer length and pointer */
1065         txbdp->length = skb->len;
1066         txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1067                         skb->len, DMA_TO_DEVICE);
1068
1069         /* Save the skb pointer so we can free it later */
1070         priv->tx_skbuff[priv->skb_curtx] = skb;
1071
1072         /* Update the current skb pointer (wrapping if this was the last) */
1073         priv->skb_curtx =
1074             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1075
1076         /* Flag the BD as interrupt-causing */
1077         status |= TXBD_INTERRUPT;
1078
1079         /* Flag the BD as ready to go, last in frame, and  */
1080         /* in need of CRC */
1081         status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1082
1083         dev->trans_start = jiffies;
1084
1085         /* The powerpc-specific eieio() is used, as wmb() has too strong
1086          * semantics (it requires synchronization between cacheable and
1087          * uncacheable mappings, which eieio doesn't provide and which we
1088          * don't need), thus requiring a more expensive sync instruction.  At
1089          * some point, the set of architecture-independent barrier functions
1090          * should be expanded to include weaker barriers.
1091          */
1092
1093         eieio();
1094         txbdp->status = status;
1095
1096         /* If this was the last BD in the ring, the next one */
1097         /* is at the beginning of the ring */
1098         if (txbdp->status & TXBD_WRAP)
1099                 txbdp = priv->tx_bd_base;
1100         else
1101                 txbdp++;
1102
1103         /* If the next BD still needs to be cleaned up, then the bds
1104            are full.  We need to tell the kernel to stop sending us stuff. */
1105         if (txbdp == priv->dirty_tx) {
1106                 netif_stop_queue(dev);
1107
1108                 dev->stats.tx_fifo_errors++;
1109         }
1110
1111         /* Update the current txbd to the next one */
1112         priv->cur_tx = txbdp;
1113
1114         /* Tell the DMA to go go go */
1115         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1116
1117         /* Unlock priv */
1118         spin_unlock_irqrestore(&priv->txlock, flags);
1119
1120         return 0;
1121 }
1122
1123 /* Stops the kernel queue, and halts the controller */
1124 static int gfar_close(struct net_device *dev)
1125 {
1126         struct gfar_private *priv = netdev_priv(dev);
1127
1128 #ifdef CONFIG_GFAR_NAPI
1129         napi_disable(&priv->napi);
1130 #endif
1131
1132         stop_gfar(dev);
1133
1134         /* Disconnect from the PHY */
1135         phy_disconnect(priv->phydev);
1136         priv->phydev = NULL;
1137
1138         netif_stop_queue(dev);
1139
1140         return 0;
1141 }
1142
1143 /* Changes the mac address if the controller is not running. */
1144 int gfar_set_mac_address(struct net_device *dev)
1145 {
1146         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1147
1148         return 0;
1149 }
1150
1151
1152 /* Enables and disables VLAN insertion/extraction */
1153 static void gfar_vlan_rx_register(struct net_device *dev,
1154                 struct vlan_group *grp)
1155 {
1156         struct gfar_private *priv = netdev_priv(dev);
1157         unsigned long flags;
1158         u32 tempval;
1159
1160         spin_lock_irqsave(&priv->rxlock, flags);
1161
1162         priv->vlgrp = grp;
1163
1164         if (grp) {
1165                 /* Enable VLAN tag insertion */
1166                 tempval = gfar_read(&priv->regs->tctrl);
1167                 tempval |= TCTRL_VLINS;
1168
1169                 gfar_write(&priv->regs->tctrl, tempval);
1170
1171                 /* Enable VLAN tag extraction */
1172                 tempval = gfar_read(&priv->regs->rctrl);
1173                 tempval |= RCTRL_VLEX;
1174                 gfar_write(&priv->regs->rctrl, tempval);
1175         } else {
1176                 /* Disable VLAN tag insertion */
1177                 tempval = gfar_read(&priv->regs->tctrl);
1178                 tempval &= ~TCTRL_VLINS;
1179                 gfar_write(&priv->regs->tctrl, tempval);
1180
1181                 /* Disable VLAN tag extraction */
1182                 tempval = gfar_read(&priv->regs->rctrl);
1183                 tempval &= ~RCTRL_VLEX;
1184                 gfar_write(&priv->regs->rctrl, tempval);
1185         }
1186
1187         spin_unlock_irqrestore(&priv->rxlock, flags);
1188 }
1189
1190 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1191 {
1192         int tempsize, tempval;
1193         struct gfar_private *priv = netdev_priv(dev);
1194         int oldsize = priv->rx_buffer_size;
1195         int frame_size = new_mtu + ETH_HLEN;
1196
1197         if (priv->vlan_enable)
1198                 frame_size += VLAN_HLEN;
1199
1200         if (gfar_uses_fcb(priv))
1201                 frame_size += GMAC_FCB_LEN;
1202
1203         frame_size += priv->padding;
1204
1205         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1206                 if (netif_msg_drv(priv))
1207                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1208                                         dev->name);
1209                 return -EINVAL;
1210         }
1211
1212         tempsize =
1213             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1214             INCREMENTAL_BUFFER_SIZE;
1215
1216         /* Only stop and start the controller if it isn't already
1217          * stopped, and we changed something */
1218         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1219                 stop_gfar(dev);
1220
1221         priv->rx_buffer_size = tempsize;
1222
1223         dev->mtu = new_mtu;
1224
1225         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1226         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1227
1228         /* If the mtu is larger than the max size for standard
1229          * ethernet frames (ie, a jumbo frame), then set maccfg2
1230          * to allow huge frames, and to check the length */
1231         tempval = gfar_read(&priv->regs->maccfg2);
1232
1233         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1234                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1235         else
1236                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1237
1238         gfar_write(&priv->regs->maccfg2, tempval);
1239
1240         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1241                 startup_gfar(dev);
1242
1243         return 0;
1244 }
1245
1246 /* gfar_timeout gets called when a packet has not been
1247  * transmitted after a set amount of time.
1248  * For now, assume that clearing out all the structures, and
1249  * starting over will fix the problem. */
1250 static void gfar_timeout(struct net_device *dev)
1251 {
1252         dev->stats.tx_errors++;
1253
1254         if (dev->flags & IFF_UP) {
1255                 stop_gfar(dev);
1256                 startup_gfar(dev);
1257         }
1258
1259         netif_schedule(dev);
1260 }
1261
1262 /* Interrupt Handler for Transmit complete */
1263 int gfar_clean_tx_ring(struct net_device *dev)
1264 {
1265         struct txbd8 *bdp;
1266         struct gfar_private *priv = netdev_priv(dev);
1267         int howmany = 0;
1268
1269         bdp = priv->dirty_tx;
1270         while ((bdp->status & TXBD_READY) == 0) {
1271                 /* If dirty_tx and cur_tx are the same, then either the */
1272                 /* ring is empty or full now (it could only be full in the beginning, */
1273                 /* obviously).  If it is empty, we are done. */
1274                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1275                         break;
1276
1277                 howmany++;
1278
1279                 /* Deferred means some collisions occurred during transmit, */
1280                 /* but we eventually sent the packet. */
1281                 if (bdp->status & TXBD_DEF)
1282                         dev->stats.collisions++;
1283
1284                 /* Free the sk buffer associated with this TxBD */
1285                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1286
1287                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1288                 priv->skb_dirtytx =
1289                     (priv->skb_dirtytx +
1290                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1291
1292                 /* Clean BD length for empty detection */
1293                 bdp->length = 0;
1294
1295                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1296                 if (bdp->status & TXBD_WRAP)
1297                         bdp = priv->tx_bd_base;
1298                 else
1299                         bdp++;
1300
1301                 /* Move dirty_tx to be the next bd */
1302                 priv->dirty_tx = bdp;
1303
1304                 /* We freed a buffer, so now we can restart transmission */
1305                 if (netif_queue_stopped(dev))
1306                         netif_wake_queue(dev);
1307         } /* while ((bdp->status & TXBD_READY) == 0) */
1308
1309         dev->stats.tx_packets += howmany;
1310
1311         return howmany;
1312 }
1313
1314 /* Interrupt Handler for Transmit complete */
1315 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1316 {
1317         struct net_device *dev = (struct net_device *) dev_id;
1318         struct gfar_private *priv = netdev_priv(dev);
1319
1320         /* Clear IEVENT */
1321         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1322
1323         /* Lock priv */
1324         spin_lock(&priv->txlock);
1325
1326         gfar_clean_tx_ring(dev);
1327
1328         /* If we are coalescing the interrupts, reset the timer */
1329         /* Otherwise, clear it */
1330         if (likely(priv->txcoalescing)) {
1331                 gfar_write(&priv->regs->txic, 0);
1332                 gfar_write(&priv->regs->txic,
1333                            mk_ic_value(priv->txcount, priv->txtime));
1334         }
1335
1336         spin_unlock(&priv->txlock);
1337
1338         return IRQ_HANDLED;
1339 }
1340
1341 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1342                 struct sk_buff *skb)
1343 {
1344         struct gfar_private *priv = netdev_priv(dev);
1345         u32 * status_len = (u32 *)bdp;
1346         u16 flags;
1347
1348         bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1349                         priv->rx_buffer_size, DMA_FROM_DEVICE);
1350
1351         flags = RXBD_EMPTY | RXBD_INTERRUPT;
1352
1353         if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1354                 flags |= RXBD_WRAP;
1355
1356         eieio();
1357
1358         *status_len = (u32)flags << 16;
1359 }
1360
1361
1362 struct sk_buff * gfar_new_skb(struct net_device *dev)
1363 {
1364         unsigned int alignamount;
1365         struct gfar_private *priv = netdev_priv(dev);
1366         struct sk_buff *skb = NULL;
1367
1368         /* We have to allocate the skb, so keep trying till we succeed */
1369         skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1370
1371         if (!skb)
1372                 return NULL;
1373
1374         alignamount = RXBUF_ALIGNMENT -
1375                 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1376
1377         /* We need the data buffer to be aligned properly.  We will reserve
1378          * as many bytes as needed to align the data properly
1379          */
1380         skb_reserve(skb, alignamount);
1381
1382         return skb;
1383 }
1384
1385 static inline void count_errors(unsigned short status, struct net_device *dev)
1386 {
1387         struct gfar_private *priv = netdev_priv(dev);
1388         struct net_device_stats *stats = &dev->stats;
1389         struct gfar_extra_stats *estats = &priv->extra_stats;
1390
1391         /* If the packet was truncated, none of the other errors
1392          * matter */
1393         if (status & RXBD_TRUNCATED) {
1394                 stats->rx_length_errors++;
1395
1396                 estats->rx_trunc++;
1397
1398                 return;
1399         }
1400         /* Count the errors, if there were any */
1401         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1402                 stats->rx_length_errors++;
1403
1404                 if (status & RXBD_LARGE)
1405                         estats->rx_large++;
1406                 else
1407                         estats->rx_short++;
1408         }
1409         if (status & RXBD_NONOCTET) {
1410                 stats->rx_frame_errors++;
1411                 estats->rx_nonoctet++;
1412         }
1413         if (status & RXBD_CRCERR) {
1414                 estats->rx_crcerr++;
1415                 stats->rx_crc_errors++;
1416         }
1417         if (status & RXBD_OVERRUN) {
1418                 estats->rx_overrun++;
1419                 stats->rx_crc_errors++;
1420         }
1421 }
1422
1423 irqreturn_t gfar_receive(int irq, void *dev_id)
1424 {
1425         struct net_device *dev = (struct net_device *) dev_id;
1426         struct gfar_private *priv = netdev_priv(dev);
1427 #ifdef CONFIG_GFAR_NAPI
1428         u32 tempval;
1429 #else
1430         unsigned long flags;
1431 #endif
1432
1433         /* support NAPI */
1434 #ifdef CONFIG_GFAR_NAPI
1435         /* Clear IEVENT, so interrupts aren't called again
1436          * because of the packets that have already arrived */
1437         gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1438
1439         if (netif_rx_schedule_prep(dev, &priv->napi)) {
1440                 tempval = gfar_read(&priv->regs->imask);
1441                 tempval &= IMASK_RTX_DISABLED;
1442                 gfar_write(&priv->regs->imask, tempval);
1443
1444                 __netif_rx_schedule(dev, &priv->napi);
1445         } else {
1446                 if (netif_msg_rx_err(priv))
1447                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1448                                 dev->name, gfar_read(&priv->regs->ievent),
1449                                 gfar_read(&priv->regs->imask));
1450         }
1451 #else
1452         /* Clear IEVENT, so rx interrupt isn't called again
1453          * because of this interrupt */
1454         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1455
1456         spin_lock_irqsave(&priv->rxlock, flags);
1457         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1458
1459         /* If we are coalescing interrupts, update the timer */
1460         /* Otherwise, clear it */
1461         if (likely(priv->rxcoalescing)) {
1462                 gfar_write(&priv->regs->rxic, 0);
1463                 gfar_write(&priv->regs->rxic,
1464                            mk_ic_value(priv->rxcount, priv->rxtime));
1465         }
1466
1467         spin_unlock_irqrestore(&priv->rxlock, flags);
1468 #endif
1469
1470         return IRQ_HANDLED;
1471 }
1472
1473 static inline int gfar_rx_vlan(struct sk_buff *skb,
1474                 struct vlan_group *vlgrp, unsigned short vlctl)
1475 {
1476 #ifdef CONFIG_GFAR_NAPI
1477         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1478 #else
1479         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1480 #endif
1481 }
1482
1483 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1484 {
1485         /* If valid headers were found, and valid sums
1486          * were verified, then we tell the kernel that no
1487          * checksumming is necessary.  Otherwise, it is */
1488         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1489                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1490         else
1491                 skb->ip_summed = CHECKSUM_NONE;
1492 }
1493
1494
1495 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1496 {
1497         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1498
1499         /* Remove the FCB from the skb */
1500         skb_pull(skb, GMAC_FCB_LEN);
1501
1502         return fcb;
1503 }
1504
1505 /* gfar_process_frame() -- handle one incoming packet if skb
1506  * isn't NULL.  */
1507 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1508                 int length)
1509 {
1510         struct gfar_private *priv = netdev_priv(dev);
1511         struct rxfcb *fcb = NULL;
1512
1513         if (NULL == skb) {
1514                 if (netif_msg_rx_err(priv))
1515                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1516                 dev->stats.rx_dropped++;
1517                 priv->extra_stats.rx_skbmissing++;
1518         } else {
1519                 int ret;
1520
1521                 /* Prep the skb for the packet */
1522                 skb_put(skb, length);
1523
1524                 /* Grab the FCB if there is one */
1525                 if (gfar_uses_fcb(priv))
1526                         fcb = gfar_get_fcb(skb);
1527
1528                 /* Remove the padded bytes, if there are any */
1529                 if (priv->padding)
1530                         skb_pull(skb, priv->padding);
1531
1532                 if (priv->rx_csum_enable)
1533                         gfar_rx_checksum(skb, fcb);
1534
1535                 /* Tell the skb what kind of packet this is */
1536                 skb->protocol = eth_type_trans(skb, dev);
1537
1538                 /* Send the packet up the stack */
1539                 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1540                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1541                 else
1542                         ret = RECEIVE(skb);
1543
1544                 if (NET_RX_DROP == ret)
1545                         priv->extra_stats.kernel_dropped++;
1546         }
1547
1548         return 0;
1549 }
1550
1551 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1552  *   until the budget/quota has been reached. Returns the number
1553  *   of frames handled
1554  */
1555 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1556 {
1557         struct rxbd8 *bdp;
1558         struct sk_buff *skb;
1559         u16 pkt_len;
1560         int howmany = 0;
1561         struct gfar_private *priv = netdev_priv(dev);
1562
1563         /* Get the first full descriptor */
1564         bdp = priv->cur_rx;
1565
1566         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1567                 struct sk_buff *newskb;
1568                 rmb();
1569
1570                 /* Add another skb for the future */
1571                 newskb = gfar_new_skb(dev);
1572
1573                 skb = priv->rx_skbuff[priv->skb_currx];
1574
1575                 /* We drop the frame if we failed to allocate a new buffer */
1576                 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1577                                  bdp->status & RXBD_ERR)) {
1578                         count_errors(bdp->status, dev);
1579
1580                         if (unlikely(!newskb))
1581                                 newskb = skb;
1582
1583                         if (skb) {
1584                                 dma_unmap_single(&priv->dev->dev,
1585                                                 bdp->bufPtr,
1586                                                 priv->rx_buffer_size,
1587                                                 DMA_FROM_DEVICE);
1588
1589                                 dev_kfree_skb_any(skb);
1590                         }
1591                 } else {
1592                         /* Increment the number of packets */
1593                         dev->stats.rx_packets++;
1594                         howmany++;
1595
1596                         /* Remove the FCS from the packet length */
1597                         pkt_len = bdp->length - 4;
1598
1599                         gfar_process_frame(dev, skb, pkt_len);
1600
1601                         dev->stats.rx_bytes += pkt_len;
1602                 }
1603
1604                 dev->last_rx = jiffies;
1605
1606                 priv->rx_skbuff[priv->skb_currx] = newskb;
1607
1608                 /* Setup the new bdp */
1609                 gfar_new_rxbdp(dev, bdp, newskb);
1610
1611                 /* Update to the next pointer */
1612                 if (bdp->status & RXBD_WRAP)
1613                         bdp = priv->rx_bd_base;
1614                 else
1615                         bdp++;
1616
1617                 /* update to point at the next skb */
1618                 priv->skb_currx =
1619                     (priv->skb_currx + 1) &
1620                     RX_RING_MOD_MASK(priv->rx_ring_size);
1621         }
1622
1623         /* Update the current rxbd pointer to be the next one */
1624         priv->cur_rx = bdp;
1625
1626         return howmany;
1627 }
1628
1629 #ifdef CONFIG_GFAR_NAPI
1630 static int gfar_poll(struct napi_struct *napi, int budget)
1631 {
1632         struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1633         struct net_device *dev = priv->dev;
1634         int howmany;
1635         unsigned long flags;
1636
1637         /* If we fail to get the lock, don't bother with the TX BDs */
1638         if (spin_trylock_irqsave(&priv->txlock, flags)) {
1639                 gfar_clean_tx_ring(dev);
1640                 spin_unlock_irqrestore(&priv->txlock, flags);
1641         }
1642
1643         howmany = gfar_clean_rx_ring(dev, budget);
1644
1645         if (howmany < budget) {
1646                 netif_rx_complete(dev, napi);
1647
1648                 /* Clear the halt bit in RSTAT */
1649                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1650
1651                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1652
1653                 /* If we are coalescing interrupts, update the timer */
1654                 /* Otherwise, clear it */
1655                 if (likely(priv->rxcoalescing)) {
1656                         gfar_write(&priv->regs->rxic, 0);
1657                         gfar_write(&priv->regs->rxic,
1658                                    mk_ic_value(priv->rxcount, priv->rxtime));
1659                 }
1660         }
1661
1662         return howmany;
1663 }
1664 #endif
1665
1666 #ifdef CONFIG_NET_POLL_CONTROLLER
1667 /*
1668  * Polling 'interrupt' - used by things like netconsole to send skbs
1669  * without having to re-enable interrupts. It's not called while
1670  * the interrupt routine is executing.
1671  */
1672 static void gfar_netpoll(struct net_device *dev)
1673 {
1674         struct gfar_private *priv = netdev_priv(dev);
1675
1676         /* If the device has multiple interrupts, run tx/rx */
1677         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1678                 disable_irq(priv->interruptTransmit);
1679                 disable_irq(priv->interruptReceive);
1680                 disable_irq(priv->interruptError);
1681                 gfar_interrupt(priv->interruptTransmit, dev);
1682                 enable_irq(priv->interruptError);
1683                 enable_irq(priv->interruptReceive);
1684                 enable_irq(priv->interruptTransmit);
1685         } else {
1686                 disable_irq(priv->interruptTransmit);
1687                 gfar_interrupt(priv->interruptTransmit, dev);
1688                 enable_irq(priv->interruptTransmit);
1689         }
1690 }
1691 #endif
1692
1693 /* The interrupt handler for devices with one interrupt */
1694 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1695 {
1696         struct net_device *dev = dev_id;
1697         struct gfar_private *priv = netdev_priv(dev);
1698
1699         /* Save ievent for future reference */
1700         u32 events = gfar_read(&priv->regs->ievent);
1701
1702         /* Check for reception */
1703         if (events & IEVENT_RX_MASK)
1704                 gfar_receive(irq, dev_id);
1705
1706         /* Check for transmit completion */
1707         if (events & IEVENT_TX_MASK)
1708                 gfar_transmit(irq, dev_id);
1709
1710         /* Check for errors */
1711         if (events & IEVENT_ERR_MASK)
1712                 gfar_error(irq, dev_id);
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 /* Called every time the controller might need to be made
1718  * aware of new link state.  The PHY code conveys this
1719  * information through variables in the phydev structure, and this
1720  * function converts those variables into the appropriate
1721  * register values, and can bring down the device if needed.
1722  */
1723 static void adjust_link(struct net_device *dev)
1724 {
1725         struct gfar_private *priv = netdev_priv(dev);
1726         struct gfar __iomem *regs = priv->regs;
1727         unsigned long flags;
1728         struct phy_device *phydev = priv->phydev;
1729         int new_state = 0;
1730
1731         spin_lock_irqsave(&priv->txlock, flags);
1732         if (phydev->link) {
1733                 u32 tempval = gfar_read(&regs->maccfg2);
1734                 u32 ecntrl = gfar_read(&regs->ecntrl);
1735
1736                 /* Now we make sure that we can be in full duplex mode.
1737                  * If not, we operate in half-duplex mode. */
1738                 if (phydev->duplex != priv->oldduplex) {
1739                         new_state = 1;
1740                         if (!(phydev->duplex))
1741                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1742                         else
1743                                 tempval |= MACCFG2_FULL_DUPLEX;
1744
1745                         priv->oldduplex = phydev->duplex;
1746                 }
1747
1748                 if (phydev->speed != priv->oldspeed) {
1749                         new_state = 1;
1750                         switch (phydev->speed) {
1751                         case 1000:
1752                                 tempval =
1753                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1754                                 break;
1755                         case 100:
1756                         case 10:
1757                                 tempval =
1758                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1759
1760                                 /* Reduced mode distinguishes
1761                                  * between 10 and 100 */
1762                                 if (phydev->speed == SPEED_100)
1763                                         ecntrl |= ECNTRL_R100;
1764                                 else
1765                                         ecntrl &= ~(ECNTRL_R100);
1766                                 break;
1767                         default:
1768                                 if (netif_msg_link(priv))
1769                                         printk(KERN_WARNING
1770                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1771                                                 dev->name, phydev->speed);
1772                                 break;
1773                         }
1774
1775                         priv->oldspeed = phydev->speed;
1776                 }
1777
1778                 gfar_write(&regs->maccfg2, tempval);
1779                 gfar_write(&regs->ecntrl, ecntrl);
1780
1781                 if (!priv->oldlink) {
1782                         new_state = 1;
1783                         priv->oldlink = 1;
1784                         netif_schedule(dev);
1785                 }
1786         } else if (priv->oldlink) {
1787                 new_state = 1;
1788                 priv->oldlink = 0;
1789                 priv->oldspeed = 0;
1790                 priv->oldduplex = -1;
1791         }
1792
1793         if (new_state && netif_msg_link(priv))
1794                 phy_print_status(phydev);
1795
1796         spin_unlock_irqrestore(&priv->txlock, flags);
1797 }
1798
1799 /* Update the hash table based on the current list of multicast
1800  * addresses we subscribe to.  Also, change the promiscuity of
1801  * the device based on the flags (this function is called
1802  * whenever dev->flags is changed */
1803 static void gfar_set_multi(struct net_device *dev)
1804 {
1805         struct dev_mc_list *mc_ptr;
1806         struct gfar_private *priv = netdev_priv(dev);
1807         struct gfar __iomem *regs = priv->regs;
1808         u32 tempval;
1809
1810         if(dev->flags & IFF_PROMISC) {
1811                 /* Set RCTRL to PROM */
1812                 tempval = gfar_read(&regs->rctrl);
1813                 tempval |= RCTRL_PROM;
1814                 gfar_write(&regs->rctrl, tempval);
1815         } else {
1816                 /* Set RCTRL to not PROM */
1817                 tempval = gfar_read(&regs->rctrl);
1818                 tempval &= ~(RCTRL_PROM);
1819                 gfar_write(&regs->rctrl, tempval);
1820         }
1821
1822         if(dev->flags & IFF_ALLMULTI) {
1823                 /* Set the hash to rx all multicast frames */
1824                 gfar_write(&regs->igaddr0, 0xffffffff);
1825                 gfar_write(&regs->igaddr1, 0xffffffff);
1826                 gfar_write(&regs->igaddr2, 0xffffffff);
1827                 gfar_write(&regs->igaddr3, 0xffffffff);
1828                 gfar_write(&regs->igaddr4, 0xffffffff);
1829                 gfar_write(&regs->igaddr5, 0xffffffff);
1830                 gfar_write(&regs->igaddr6, 0xffffffff);
1831                 gfar_write(&regs->igaddr7, 0xffffffff);
1832                 gfar_write(&regs->gaddr0, 0xffffffff);
1833                 gfar_write(&regs->gaddr1, 0xffffffff);
1834                 gfar_write(&regs->gaddr2, 0xffffffff);
1835                 gfar_write(&regs->gaddr3, 0xffffffff);
1836                 gfar_write(&regs->gaddr4, 0xffffffff);
1837                 gfar_write(&regs->gaddr5, 0xffffffff);
1838                 gfar_write(&regs->gaddr6, 0xffffffff);
1839                 gfar_write(&regs->gaddr7, 0xffffffff);
1840         } else {
1841                 int em_num;
1842                 int idx;
1843
1844                 /* zero out the hash */
1845                 gfar_write(&regs->igaddr0, 0x0);
1846                 gfar_write(&regs->igaddr1, 0x0);
1847                 gfar_write(&regs->igaddr2, 0x0);
1848                 gfar_write(&regs->igaddr3, 0x0);
1849                 gfar_write(&regs->igaddr4, 0x0);
1850                 gfar_write(&regs->igaddr5, 0x0);
1851                 gfar_write(&regs->igaddr6, 0x0);
1852                 gfar_write(&regs->igaddr7, 0x0);
1853                 gfar_write(&regs->gaddr0, 0x0);
1854                 gfar_write(&regs->gaddr1, 0x0);
1855                 gfar_write(&regs->gaddr2, 0x0);
1856                 gfar_write(&regs->gaddr3, 0x0);
1857                 gfar_write(&regs->gaddr4, 0x0);
1858                 gfar_write(&regs->gaddr5, 0x0);
1859                 gfar_write(&regs->gaddr6, 0x0);
1860                 gfar_write(&regs->gaddr7, 0x0);
1861
1862                 /* If we have extended hash tables, we need to
1863                  * clear the exact match registers to prepare for
1864                  * setting them */
1865                 if (priv->extended_hash) {
1866                         em_num = GFAR_EM_NUM + 1;
1867                         gfar_clear_exact_match(dev);
1868                         idx = 1;
1869                 } else {
1870                         idx = 0;
1871                         em_num = 0;
1872                 }
1873
1874                 if(dev->mc_count == 0)
1875                         return;
1876
1877                 /* Parse the list, and set the appropriate bits */
1878                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1879                         if (idx < em_num) {
1880                                 gfar_set_mac_for_addr(dev, idx,
1881                                                 mc_ptr->dmi_addr);
1882                                 idx++;
1883                         } else
1884                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1885                 }
1886         }
1887
1888         return;
1889 }
1890
1891
1892 /* Clears each of the exact match registers to zero, so they
1893  * don't interfere with normal reception */
1894 static void gfar_clear_exact_match(struct net_device *dev)
1895 {
1896         int idx;
1897         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1898
1899         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1900                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1901 }
1902
1903 /* Set the appropriate hash bit for the given addr */
1904 /* The algorithm works like so:
1905  * 1) Take the Destination Address (ie the multicast address), and
1906  * do a CRC on it (little endian), and reverse the bits of the
1907  * result.
1908  * 2) Use the 8 most significant bits as a hash into a 256-entry
1909  * table.  The table is controlled through 8 32-bit registers:
1910  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1911  * gaddr7.  This means that the 3 most significant bits in the
1912  * hash index which gaddr register to use, and the 5 other bits
1913  * indicate which bit (assuming an IBM numbering scheme, which
1914  * for PowerPC (tm) is usually the case) in the register holds
1915  * the entry. */
1916 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1917 {
1918         u32 tempval;
1919         struct gfar_private *priv = netdev_priv(dev);
1920         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1921         int width = priv->hash_width;
1922         u8 whichbit = (result >> (32 - width)) & 0x1f;
1923         u8 whichreg = result >> (32 - width + 5);
1924         u32 value = (1 << (31-whichbit));
1925
1926         tempval = gfar_read(priv->hash_regs[whichreg]);
1927         tempval |= value;
1928         gfar_write(priv->hash_regs[whichreg], tempval);
1929
1930         return;
1931 }
1932
1933
1934 /* There are multiple MAC Address register pairs on some controllers
1935  * This function sets the numth pair to a given address
1936  */
1937 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1938 {
1939         struct gfar_private *priv = netdev_priv(dev);
1940         int idx;
1941         char tmpbuf[MAC_ADDR_LEN];
1942         u32 tempval;
1943         u32 __iomem *macptr = &priv->regs->macstnaddr1;
1944
1945         macptr += num*2;
1946
1947         /* Now copy it into the mac registers backwards, cuz */
1948         /* little endian is silly */
1949         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1950                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1951
1952         gfar_write(macptr, *((u32 *) (tmpbuf)));
1953
1954         tempval = *((u32 *) (tmpbuf + 4));
1955
1956         gfar_write(macptr+1, tempval);
1957 }
1958
1959 /* GFAR error interrupt handler */
1960 static irqreturn_t gfar_error(int irq, void *dev_id)
1961 {
1962         struct net_device *dev = dev_id;
1963         struct gfar_private *priv = netdev_priv(dev);
1964
1965         /* Save ievent for future reference */
1966         u32 events = gfar_read(&priv->regs->ievent);
1967
1968         /* Clear IEVENT */
1969         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1970
1971         /* Hmm... */
1972         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1973                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1974                        dev->name, events, gfar_read(&priv->regs->imask));
1975
1976         /* Update the error counters */
1977         if (events & IEVENT_TXE) {
1978                 dev->stats.tx_errors++;
1979
1980                 if (events & IEVENT_LC)
1981                         dev->stats.tx_window_errors++;
1982                 if (events & IEVENT_CRL)
1983                         dev->stats.tx_aborted_errors++;
1984                 if (events & IEVENT_XFUN) {
1985                         if (netif_msg_tx_err(priv))
1986                                 printk(KERN_DEBUG "%s: TX FIFO underrun, "
1987                                        "packet dropped.\n", dev->name);
1988                         dev->stats.tx_dropped++;
1989                         priv->extra_stats.tx_underrun++;
1990
1991                         /* Reactivate the Tx Queues */
1992                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1993                 }
1994                 if (netif_msg_tx_err(priv))
1995                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1996         }
1997         if (events & IEVENT_BSY) {
1998                 dev->stats.rx_errors++;
1999                 priv->extra_stats.rx_bsy++;
2000
2001                 gfar_receive(irq, dev_id);
2002
2003 #ifndef CONFIG_GFAR_NAPI
2004                 /* Clear the halt bit in RSTAT */
2005                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
2006 #endif
2007
2008                 if (netif_msg_rx_err(priv))
2009                         printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2010                                dev->name, gfar_read(&priv->regs->rstat));
2011         }
2012         if (events & IEVENT_BABR) {
2013                 dev->stats.rx_errors++;
2014                 priv->extra_stats.rx_babr++;
2015
2016                 if (netif_msg_rx_err(priv))
2017                         printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2018         }
2019         if (events & IEVENT_EBERR) {
2020                 priv->extra_stats.eberr++;
2021                 if (netif_msg_rx_err(priv))
2022                         printk(KERN_DEBUG "%s: bus error\n", dev->name);
2023         }
2024         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2025                 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2026
2027         if (events & IEVENT_BABT) {
2028                 priv->extra_stats.tx_babt++;
2029                 if (netif_msg_tx_err(priv))
2030                         printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2031         }
2032         return IRQ_HANDLED;
2033 }
2034
2035 /* work with hotplug and coldplug */
2036 MODULE_ALIAS("platform:fsl-gianfar");
2037
2038 /* Structure for a device driver */
2039 static struct platform_driver gfar_driver = {
2040         .probe = gfar_probe,
2041         .remove = gfar_remove,
2042         .driver = {
2043                 .name = "fsl-gianfar",
2044                 .owner = THIS_MODULE,
2045         },
2046 };
2047
2048 static int __init gfar_init(void)
2049 {
2050         int err = gfar_mdio_init();
2051
2052         if (err)
2053                 return err;
2054
2055         err = platform_driver_register(&gfar_driver);
2056
2057         if (err)
2058                 gfar_mdio_exit();
2059
2060         return err;
2061 }
2062
2063 static void __exit gfar_exit(void)
2064 {
2065         platform_driver_unregister(&gfar_driver);
2066         gfar_mdio_exit();
2067 }
2068
2069 module_init(gfar_init);
2070 module_exit(gfar_exit);
2071