falconide: fix resources reservation (take 2)
[linux-2.6] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  *
12  * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13  * Copyright (c) 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through platform_device.  Structures which
29  *  define the configuration needed by the board are defined in a
30  *  board structure in arch/ppc/platforms (though I do not
31  *  discount the possibility that other architectures could one
32  *  day be supported.
33  *
34  *  The Gianfar Ethernet Controller uses a ring of buffer
35  *  descriptors.  The beginning is indicated by a register
36  *  pointing to the physical address of the start of the ring.
37  *  The end is determined by a "wrap" bit being set in the
38  *  last descriptor of the ring.
39  *
40  *  When a packet is received, the RXF bit in the
41  *  IEVENT register is set, triggering an interrupt when the
42  *  corresponding bit in the IMASK register is also set (if
43  *  interrupt coalescing is active, then the interrupt may not
44  *  happen immediately, but will wait until either a set number
45  *  of frames or amount of time have passed).  In NAPI, the
46  *  interrupt handler will signal there is work to be done, and
47  *  exit.  Without NAPI, the packet(s) will be handled
48  *  immediately.  Both methods will start at the last known empty
49  *  descriptor, and process every subsequent descriptor until there
50  *  are none left with data (NAPI will stop after a set number of
51  *  packets to give time to other tasks, but will eventually
52  *  process all the packets).  The data arrives inside a
53  *  pre-allocated skb, and so after the skb is passed up to the
54  *  stack, a new skb must be allocated, and the address field in
55  *  the buffer descriptor must be updated to indicate this new
56  *  skb.
57  *
58  *  When the kernel requests that a packet be transmitted, the
59  *  driver starts where it left off last time, and points the
60  *  descriptor at the buffer which was passed in.  The driver
61  *  then informs the DMA engine that there are packets ready to
62  *  be transmitted.  Once the controller is finished transmitting
63  *  the packet, an interrupt may be triggered (under the same
64  *  conditions as for reception, but depending on the TXF bit).
65  *  The driver then cleans up the buffer.
66  */
67
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
96
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
103
104 #ifdef CONFIG_GFAR_NAPI
105 #define RECEIVE(x) netif_receive_skb(x)
106 #else
107 #define RECEIVE(x) netif_rx(x)
108 #endif
109
110 const char gfar_driver_name[] = "Gianfar Ethernet";
111 const char gfar_driver_version[] = "1.3";
112
113 static int gfar_enet_open(struct net_device *dev);
114 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
115 static void gfar_timeout(struct net_device *dev);
116 static int gfar_close(struct net_device *dev);
117 struct sk_buff *gfar_new_skb(struct net_device *dev);
118 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
119                 struct sk_buff *skb);
120 static int gfar_set_mac_address(struct net_device *dev);
121 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122 static irqreturn_t gfar_error(int irq, void *dev_id);
123 static irqreturn_t gfar_transmit(int irq, void *dev_id);
124 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
125 static void adjust_link(struct net_device *dev);
126 static void init_registers(struct net_device *dev);
127 static int init_phy(struct net_device *dev);
128 static int gfar_probe(struct platform_device *pdev);
129 static int gfar_remove(struct platform_device *pdev);
130 static void free_skb_resources(struct gfar_private *priv);
131 static void gfar_set_multi(struct net_device *dev);
132 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133 static void gfar_configure_serdes(struct net_device *dev);
134 extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
135 extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
136 #ifdef CONFIG_GFAR_NAPI
137 static int gfar_poll(struct napi_struct *napi, int budget);
138 #endif
139 #ifdef CONFIG_NET_POLL_CONTROLLER
140 static void gfar_netpoll(struct net_device *dev);
141 #endif
142 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
143 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
144 static void gfar_vlan_rx_register(struct net_device *netdev,
145                                 struct vlan_group *grp);
146 void gfar_halt(struct net_device *dev);
147 void gfar_start(struct net_device *dev);
148 static void gfar_clear_exact_match(struct net_device *dev);
149 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
150
151 extern const struct ethtool_ops gfar_ethtool_ops;
152
153 MODULE_AUTHOR("Freescale Semiconductor, Inc");
154 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
155 MODULE_LICENSE("GPL");
156
157 /* Returns 1 if incoming frames use an FCB */
158 static inline int gfar_uses_fcb(struct gfar_private *priv)
159 {
160         return (priv->vlan_enable || priv->rx_csum_enable);
161 }
162
163 /* Set up the ethernet device structure, private data,
164  * and anything else we need before we start */
165 static int gfar_probe(struct platform_device *pdev)
166 {
167         u32 tempval;
168         struct net_device *dev = NULL;
169         struct gfar_private *priv = NULL;
170         struct gianfar_platform_data *einfo;
171         struct resource *r;
172         int err = 0;
173         DECLARE_MAC_BUF(mac);
174
175         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
176
177         if (NULL == einfo) {
178                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
179                        pdev->id);
180
181                 return -ENODEV;
182         }
183
184         /* Create an ethernet device instance */
185         dev = alloc_etherdev(sizeof (*priv));
186
187         if (NULL == dev)
188                 return -ENOMEM;
189
190         priv = netdev_priv(dev);
191         priv->dev = dev;
192
193         /* Set the info in the priv to the current info */
194         priv->einfo = einfo;
195
196         /* fill out IRQ fields */
197         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
198                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
199                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
200                 priv->interruptError = platform_get_irq_byname(pdev, "error");
201                 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
202                         goto regs_fail;
203         } else {
204                 priv->interruptTransmit = platform_get_irq(pdev, 0);
205                 if (priv->interruptTransmit < 0)
206                         goto regs_fail;
207         }
208
209         /* get a pointer to the register memory */
210         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
211         priv->regs = ioremap(r->start, sizeof (struct gfar));
212
213         if (NULL == priv->regs) {
214                 err = -ENOMEM;
215                 goto regs_fail;
216         }
217
218         spin_lock_init(&priv->txlock);
219         spin_lock_init(&priv->rxlock);
220
221         platform_set_drvdata(pdev, dev);
222
223         /* Stop the DMA engine now, in case it was running before */
224         /* (The firmware could have used it, and left it running). */
225         /* To do this, we write Graceful Receive Stop and Graceful */
226         /* Transmit Stop, and then wait until the corresponding bits */
227         /* in IEVENT indicate the stops have completed. */
228         tempval = gfar_read(&priv->regs->dmactrl);
229         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
230         gfar_write(&priv->regs->dmactrl, tempval);
231
232         tempval = gfar_read(&priv->regs->dmactrl);
233         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
234         gfar_write(&priv->regs->dmactrl, tempval);
235
236         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
237                 cpu_relax();
238
239         /* Reset MAC layer */
240         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
241
242         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
243         gfar_write(&priv->regs->maccfg1, tempval);
244
245         /* Initialize MACCFG2. */
246         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
247
248         /* Initialize ECNTRL */
249         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
250
251         /* Copy the station address into the dev structure, */
252         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
253
254         /* Set the dev->base_addr to the gfar reg region */
255         dev->base_addr = (unsigned long) (priv->regs);
256
257         SET_NETDEV_DEV(dev, &pdev->dev);
258
259         /* Fill in the dev structure */
260         dev->open = gfar_enet_open;
261         dev->hard_start_xmit = gfar_start_xmit;
262         dev->tx_timeout = gfar_timeout;
263         dev->watchdog_timeo = TX_TIMEOUT;
264 #ifdef CONFIG_GFAR_NAPI
265         netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
266 #endif
267 #ifdef CONFIG_NET_POLL_CONTROLLER
268         dev->poll_controller = gfar_netpoll;
269 #endif
270         dev->stop = gfar_close;
271         dev->change_mtu = gfar_change_mtu;
272         dev->mtu = 1500;
273         dev->set_multicast_list = gfar_set_multi;
274
275         dev->ethtool_ops = &gfar_ethtool_ops;
276
277         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
278                 priv->rx_csum_enable = 1;
279                 dev->features |= NETIF_F_IP_CSUM;
280         } else
281                 priv->rx_csum_enable = 0;
282
283         priv->vlgrp = NULL;
284
285         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
286                 dev->vlan_rx_register = gfar_vlan_rx_register;
287
288                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
289
290                 priv->vlan_enable = 1;
291         }
292
293         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
294                 priv->extended_hash = 1;
295                 priv->hash_width = 9;
296
297                 priv->hash_regs[0] = &priv->regs->igaddr0;
298                 priv->hash_regs[1] = &priv->regs->igaddr1;
299                 priv->hash_regs[2] = &priv->regs->igaddr2;
300                 priv->hash_regs[3] = &priv->regs->igaddr3;
301                 priv->hash_regs[4] = &priv->regs->igaddr4;
302                 priv->hash_regs[5] = &priv->regs->igaddr5;
303                 priv->hash_regs[6] = &priv->regs->igaddr6;
304                 priv->hash_regs[7] = &priv->regs->igaddr7;
305                 priv->hash_regs[8] = &priv->regs->gaddr0;
306                 priv->hash_regs[9] = &priv->regs->gaddr1;
307                 priv->hash_regs[10] = &priv->regs->gaddr2;
308                 priv->hash_regs[11] = &priv->regs->gaddr3;
309                 priv->hash_regs[12] = &priv->regs->gaddr4;
310                 priv->hash_regs[13] = &priv->regs->gaddr5;
311                 priv->hash_regs[14] = &priv->regs->gaddr6;
312                 priv->hash_regs[15] = &priv->regs->gaddr7;
313
314         } else {
315                 priv->extended_hash = 0;
316                 priv->hash_width = 8;
317
318                 priv->hash_regs[0] = &priv->regs->gaddr0;
319                 priv->hash_regs[1] = &priv->regs->gaddr1;
320                 priv->hash_regs[2] = &priv->regs->gaddr2;
321                 priv->hash_regs[3] = &priv->regs->gaddr3;
322                 priv->hash_regs[4] = &priv->regs->gaddr4;
323                 priv->hash_regs[5] = &priv->regs->gaddr5;
324                 priv->hash_regs[6] = &priv->regs->gaddr6;
325                 priv->hash_regs[7] = &priv->regs->gaddr7;
326         }
327
328         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
329                 priv->padding = DEFAULT_PADDING;
330         else
331                 priv->padding = 0;
332
333         if (dev->features & NETIF_F_IP_CSUM)
334                 dev->hard_header_len += GMAC_FCB_LEN;
335
336         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
337         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
338         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
339
340         priv->txcoalescing = DEFAULT_TX_COALESCE;
341         priv->txcount = DEFAULT_TXCOUNT;
342         priv->txtime = DEFAULT_TXTIME;
343         priv->rxcoalescing = DEFAULT_RX_COALESCE;
344         priv->rxcount = DEFAULT_RXCOUNT;
345         priv->rxtime = DEFAULT_RXTIME;
346
347         /* Enable most messages by default */
348         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
349
350         err = register_netdev(dev);
351
352         if (err) {
353                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
354                                 dev->name);
355                 goto register_fail;
356         }
357
358         /* Create all the sysfs files */
359         gfar_init_sysfs(dev);
360
361         /* Print out the device info */
362         printk(KERN_INFO DEVICE_NAME "%s\n",
363                dev->name, print_mac(mac, dev->dev_addr));
364
365         /* Even more device info helps when determining which kernel */
366         /* provided which set of benchmarks. */
367 #ifdef CONFIG_GFAR_NAPI
368         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
369 #else
370         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
371 #endif
372         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
373                dev->name, priv->rx_ring_size, priv->tx_ring_size);
374
375         return 0;
376
377 register_fail:
378         iounmap(priv->regs);
379 regs_fail:
380         free_netdev(dev);
381         return err;
382 }
383
384 static int gfar_remove(struct platform_device *pdev)
385 {
386         struct net_device *dev = platform_get_drvdata(pdev);
387         struct gfar_private *priv = netdev_priv(dev);
388
389         platform_set_drvdata(pdev, NULL);
390
391         iounmap(priv->regs);
392         free_netdev(dev);
393
394         return 0;
395 }
396
397
398 /* Reads the controller's registers to determine what interface
399  * connects it to the PHY.
400  */
401 static phy_interface_t gfar_get_interface(struct net_device *dev)
402 {
403         struct gfar_private *priv = netdev_priv(dev);
404         u32 ecntrl = gfar_read(&priv->regs->ecntrl);
405
406         if (ecntrl & ECNTRL_SGMII_MODE)
407                 return PHY_INTERFACE_MODE_SGMII;
408
409         if (ecntrl & ECNTRL_TBI_MODE) {
410                 if (ecntrl & ECNTRL_REDUCED_MODE)
411                         return PHY_INTERFACE_MODE_RTBI;
412                 else
413                         return PHY_INTERFACE_MODE_TBI;
414         }
415
416         if (ecntrl & ECNTRL_REDUCED_MODE) {
417                 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
418                         return PHY_INTERFACE_MODE_RMII;
419                 else {
420                         phy_interface_t interface = priv->einfo->interface;
421
422                         /*
423                          * This isn't autodetected right now, so it must
424                          * be set by the device tree or platform code.
425                          */
426                         if (interface == PHY_INTERFACE_MODE_RGMII_ID)
427                                 return PHY_INTERFACE_MODE_RGMII_ID;
428
429                         return PHY_INTERFACE_MODE_RGMII;
430                 }
431         }
432
433         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
434                 return PHY_INTERFACE_MODE_GMII;
435
436         return PHY_INTERFACE_MODE_MII;
437 }
438
439
440 /* Initializes driver's PHY state, and attaches to the PHY.
441  * Returns 0 on success.
442  */
443 static int init_phy(struct net_device *dev)
444 {
445         struct gfar_private *priv = netdev_priv(dev);
446         uint gigabit_support =
447                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
448                 SUPPORTED_1000baseT_Full : 0;
449         struct phy_device *phydev;
450         char phy_id[BUS_ID_SIZE];
451         phy_interface_t interface;
452
453         priv->oldlink = 0;
454         priv->oldspeed = 0;
455         priv->oldduplex = -1;
456
457         snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
458
459         interface = gfar_get_interface(dev);
460
461         phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
462
463         if (interface == PHY_INTERFACE_MODE_SGMII)
464                 gfar_configure_serdes(dev);
465
466         if (IS_ERR(phydev)) {
467                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
468                 return PTR_ERR(phydev);
469         }
470
471         /* Remove any features not supported by the controller */
472         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
473         phydev->advertising = phydev->supported;
474
475         priv->phydev = phydev;
476
477         return 0;
478 }
479
480 static void gfar_configure_serdes(struct net_device *dev)
481 {
482         struct gfar_private *priv = netdev_priv(dev);
483         struct gfar_mii __iomem *regs =
484                         (void __iomem *)&priv->regs->gfar_mii_regs;
485
486         /* Initialise TBI i/f to communicate with serdes (lynx phy) */
487
488         /* Single clk mode, mii mode off(for aerdes communication) */
489         gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
490
491         /* Supported pause and full-duplex, no half-duplex */
492         gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
493                         ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
494                         ADVERTISE_1000XPSE_ASYM);
495
496         /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
497         gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
498                         BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
499 }
500
501 static void init_registers(struct net_device *dev)
502 {
503         struct gfar_private *priv = netdev_priv(dev);
504
505         /* Clear IEVENT */
506         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
507
508         /* Initialize IMASK */
509         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
510
511         /* Init hash registers to zero */
512         gfar_write(&priv->regs->igaddr0, 0);
513         gfar_write(&priv->regs->igaddr1, 0);
514         gfar_write(&priv->regs->igaddr2, 0);
515         gfar_write(&priv->regs->igaddr3, 0);
516         gfar_write(&priv->regs->igaddr4, 0);
517         gfar_write(&priv->regs->igaddr5, 0);
518         gfar_write(&priv->regs->igaddr6, 0);
519         gfar_write(&priv->regs->igaddr7, 0);
520
521         gfar_write(&priv->regs->gaddr0, 0);
522         gfar_write(&priv->regs->gaddr1, 0);
523         gfar_write(&priv->regs->gaddr2, 0);
524         gfar_write(&priv->regs->gaddr3, 0);
525         gfar_write(&priv->regs->gaddr4, 0);
526         gfar_write(&priv->regs->gaddr5, 0);
527         gfar_write(&priv->regs->gaddr6, 0);
528         gfar_write(&priv->regs->gaddr7, 0);
529
530         /* Zero out the rmon mib registers if it has them */
531         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
532                 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
533
534                 /* Mask off the CAM interrupts */
535                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
536                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
537         }
538
539         /* Initialize the max receive buffer length */
540         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
541
542         /* Initialize the Minimum Frame Length Register */
543         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
544
545         /* Assign the TBI an address which won't conflict with the PHYs */
546         gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
547 }
548
549
550 /* Halt the receive and transmit queues */
551 void gfar_halt(struct net_device *dev)
552 {
553         struct gfar_private *priv = netdev_priv(dev);
554         struct gfar __iomem *regs = priv->regs;
555         u32 tempval;
556
557         /* Mask all interrupts */
558         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
559
560         /* Clear all interrupts */
561         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
562
563         /* Stop the DMA, and wait for it to stop */
564         tempval = gfar_read(&priv->regs->dmactrl);
565         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
566             != (DMACTRL_GRS | DMACTRL_GTS)) {
567                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
568                 gfar_write(&priv->regs->dmactrl, tempval);
569
570                 while (!(gfar_read(&priv->regs->ievent) &
571                          (IEVENT_GRSC | IEVENT_GTSC)))
572                         cpu_relax();
573         }
574
575         /* Disable Rx and Tx */
576         tempval = gfar_read(&regs->maccfg1);
577         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
578         gfar_write(&regs->maccfg1, tempval);
579 }
580
581 void stop_gfar(struct net_device *dev)
582 {
583         struct gfar_private *priv = netdev_priv(dev);
584         struct gfar __iomem *regs = priv->regs;
585         unsigned long flags;
586
587         phy_stop(priv->phydev);
588
589         /* Lock it down */
590         spin_lock_irqsave(&priv->txlock, flags);
591         spin_lock(&priv->rxlock);
592
593         gfar_halt(dev);
594
595         spin_unlock(&priv->rxlock);
596         spin_unlock_irqrestore(&priv->txlock, flags);
597
598         /* Free the IRQs */
599         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
600                 free_irq(priv->interruptError, dev);
601                 free_irq(priv->interruptTransmit, dev);
602                 free_irq(priv->interruptReceive, dev);
603         } else {
604                 free_irq(priv->interruptTransmit, dev);
605         }
606
607         free_skb_resources(priv);
608
609         dma_free_coherent(&dev->dev,
610                         sizeof(struct txbd8)*priv->tx_ring_size
611                         + sizeof(struct rxbd8)*priv->rx_ring_size,
612                         priv->tx_bd_base,
613                         gfar_read(&regs->tbase0));
614 }
615
616 /* If there are any tx skbs or rx skbs still around, free them.
617  * Then free tx_skbuff and rx_skbuff */
618 static void free_skb_resources(struct gfar_private *priv)
619 {
620         struct rxbd8 *rxbdp;
621         struct txbd8 *txbdp;
622         int i;
623
624         /* Go through all the buffer descriptors and free their data buffers */
625         txbdp = priv->tx_bd_base;
626
627         for (i = 0; i < priv->tx_ring_size; i++) {
628
629                 if (priv->tx_skbuff[i]) {
630                         dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
631                                         txbdp->length,
632                                         DMA_TO_DEVICE);
633                         dev_kfree_skb_any(priv->tx_skbuff[i]);
634                         priv->tx_skbuff[i] = NULL;
635                 }
636         }
637
638         kfree(priv->tx_skbuff);
639
640         rxbdp = priv->rx_bd_base;
641
642         /* rx_skbuff is not guaranteed to be allocated, so only
643          * free it and its contents if it is allocated */
644         if(priv->rx_skbuff != NULL) {
645                 for (i = 0; i < priv->rx_ring_size; i++) {
646                         if (priv->rx_skbuff[i]) {
647                                 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
648                                                 priv->rx_buffer_size,
649                                                 DMA_FROM_DEVICE);
650
651                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
652                                 priv->rx_skbuff[i] = NULL;
653                         }
654
655                         rxbdp->status = 0;
656                         rxbdp->length = 0;
657                         rxbdp->bufPtr = 0;
658
659                         rxbdp++;
660                 }
661
662                 kfree(priv->rx_skbuff);
663         }
664 }
665
666 void gfar_start(struct net_device *dev)
667 {
668         struct gfar_private *priv = netdev_priv(dev);
669         struct gfar __iomem *regs = priv->regs;
670         u32 tempval;
671
672         /* Enable Rx and Tx in MACCFG1 */
673         tempval = gfar_read(&regs->maccfg1);
674         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
675         gfar_write(&regs->maccfg1, tempval);
676
677         /* Initialize DMACTRL to have WWR and WOP */
678         tempval = gfar_read(&priv->regs->dmactrl);
679         tempval |= DMACTRL_INIT_SETTINGS;
680         gfar_write(&priv->regs->dmactrl, tempval);
681
682         /* Make sure we aren't stopped */
683         tempval = gfar_read(&priv->regs->dmactrl);
684         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
685         gfar_write(&priv->regs->dmactrl, tempval);
686
687         /* Clear THLT/RHLT, so that the DMA starts polling now */
688         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
689         gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
690
691         /* Unmask the interrupts we look for */
692         gfar_write(&regs->imask, IMASK_DEFAULT);
693 }
694
695 /* Bring the controller up and running */
696 int startup_gfar(struct net_device *dev)
697 {
698         struct txbd8 *txbdp;
699         struct rxbd8 *rxbdp;
700         dma_addr_t addr = 0;
701         unsigned long vaddr;
702         int i;
703         struct gfar_private *priv = netdev_priv(dev);
704         struct gfar __iomem *regs = priv->regs;
705         int err = 0;
706         u32 rctrl = 0;
707         u32 attrs = 0;
708
709         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
710
711         /* Allocate memory for the buffer descriptors */
712         vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
713                         sizeof (struct txbd8) * priv->tx_ring_size +
714                         sizeof (struct rxbd8) * priv->rx_ring_size,
715                         &addr, GFP_KERNEL);
716
717         if (vaddr == 0) {
718                 if (netif_msg_ifup(priv))
719                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
720                                         dev->name);
721                 return -ENOMEM;
722         }
723
724         priv->tx_bd_base = (struct txbd8 *) vaddr;
725
726         /* enet DMA only understands physical addresses */
727         gfar_write(&regs->tbase0, addr);
728
729         /* Start the rx descriptor ring where the tx ring leaves off */
730         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
731         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
732         priv->rx_bd_base = (struct rxbd8 *) vaddr;
733         gfar_write(&regs->rbase0, addr);
734
735         /* Setup the skbuff rings */
736         priv->tx_skbuff =
737             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
738                                         priv->tx_ring_size, GFP_KERNEL);
739
740         if (NULL == priv->tx_skbuff) {
741                 if (netif_msg_ifup(priv))
742                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
743                                         dev->name);
744                 err = -ENOMEM;
745                 goto tx_skb_fail;
746         }
747
748         for (i = 0; i < priv->tx_ring_size; i++)
749                 priv->tx_skbuff[i] = NULL;
750
751         priv->rx_skbuff =
752             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
753                                         priv->rx_ring_size, GFP_KERNEL);
754
755         if (NULL == priv->rx_skbuff) {
756                 if (netif_msg_ifup(priv))
757                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
758                                         dev->name);
759                 err = -ENOMEM;
760                 goto rx_skb_fail;
761         }
762
763         for (i = 0; i < priv->rx_ring_size; i++)
764                 priv->rx_skbuff[i] = NULL;
765
766         /* Initialize some variables in our dev structure */
767         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
768         priv->cur_rx = priv->rx_bd_base;
769         priv->skb_curtx = priv->skb_dirtytx = 0;
770         priv->skb_currx = 0;
771
772         /* Initialize Transmit Descriptor Ring */
773         txbdp = priv->tx_bd_base;
774         for (i = 0; i < priv->tx_ring_size; i++) {
775                 txbdp->status = 0;
776                 txbdp->length = 0;
777                 txbdp->bufPtr = 0;
778                 txbdp++;
779         }
780
781         /* Set the last descriptor in the ring to indicate wrap */
782         txbdp--;
783         txbdp->status |= TXBD_WRAP;
784
785         rxbdp = priv->rx_bd_base;
786         for (i = 0; i < priv->rx_ring_size; i++) {
787                 struct sk_buff *skb;
788
789                 skb = gfar_new_skb(dev);
790
791                 if (!skb) {
792                         printk(KERN_ERR "%s: Can't allocate RX buffers\n",
793                                         dev->name);
794
795                         goto err_rxalloc_fail;
796                 }
797
798                 priv->rx_skbuff[i] = skb;
799
800                 gfar_new_rxbdp(dev, rxbdp, skb);
801
802                 rxbdp++;
803         }
804
805         /* Set the last descriptor in the ring to wrap */
806         rxbdp--;
807         rxbdp->status |= RXBD_WRAP;
808
809         /* If the device has multiple interrupts, register for
810          * them.  Otherwise, only register for the one */
811         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
812                 /* Install our interrupt handlers for Error,
813                  * Transmit, and Receive */
814                 if (request_irq(priv->interruptError, gfar_error,
815                                 0, "enet_error", dev) < 0) {
816                         if (netif_msg_intr(priv))
817                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
818                                         dev->name, priv->interruptError);
819
820                         err = -1;
821                         goto err_irq_fail;
822                 }
823
824                 if (request_irq(priv->interruptTransmit, gfar_transmit,
825                                 0, "enet_tx", dev) < 0) {
826                         if (netif_msg_intr(priv))
827                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
828                                         dev->name, priv->interruptTransmit);
829
830                         err = -1;
831
832                         goto tx_irq_fail;
833                 }
834
835                 if (request_irq(priv->interruptReceive, gfar_receive,
836                                 0, "enet_rx", dev) < 0) {
837                         if (netif_msg_intr(priv))
838                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
839                                                 dev->name, priv->interruptReceive);
840
841                         err = -1;
842                         goto rx_irq_fail;
843                 }
844         } else {
845                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
846                                 0, "gfar_interrupt", dev) < 0) {
847                         if (netif_msg_intr(priv))
848                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
849                                         dev->name, priv->interruptError);
850
851                         err = -1;
852                         goto err_irq_fail;
853                 }
854         }
855
856         phy_start(priv->phydev);
857
858         /* Configure the coalescing support */
859         if (priv->txcoalescing)
860                 gfar_write(&regs->txic,
861                            mk_ic_value(priv->txcount, priv->txtime));
862         else
863                 gfar_write(&regs->txic, 0);
864
865         if (priv->rxcoalescing)
866                 gfar_write(&regs->rxic,
867                            mk_ic_value(priv->rxcount, priv->rxtime));
868         else
869                 gfar_write(&regs->rxic, 0);
870
871         if (priv->rx_csum_enable)
872                 rctrl |= RCTRL_CHECKSUMMING;
873
874         if (priv->extended_hash) {
875                 rctrl |= RCTRL_EXTHASH;
876
877                 gfar_clear_exact_match(dev);
878                 rctrl |= RCTRL_EMEN;
879         }
880
881         if (priv->vlan_enable)
882                 rctrl |= RCTRL_VLAN;
883
884         if (priv->padding) {
885                 rctrl &= ~RCTRL_PAL_MASK;
886                 rctrl |= RCTRL_PADDING(priv->padding);
887         }
888
889         /* Init rctrl based on our settings */
890         gfar_write(&priv->regs->rctrl, rctrl);
891
892         if (dev->features & NETIF_F_IP_CSUM)
893                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
894
895         /* Set the extraction length and index */
896         attrs = ATTRELI_EL(priv->rx_stash_size) |
897                 ATTRELI_EI(priv->rx_stash_index);
898
899         gfar_write(&priv->regs->attreli, attrs);
900
901         /* Start with defaults, and add stashing or locking
902          * depending on the approprate variables */
903         attrs = ATTR_INIT_SETTINGS;
904
905         if (priv->bd_stash_en)
906                 attrs |= ATTR_BDSTASH;
907
908         if (priv->rx_stash_size != 0)
909                 attrs |= ATTR_BUFSTASH;
910
911         gfar_write(&priv->regs->attr, attrs);
912
913         gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
914         gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
915         gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
916
917         /* Start the controller */
918         gfar_start(dev);
919
920         return 0;
921
922 rx_irq_fail:
923         free_irq(priv->interruptTransmit, dev);
924 tx_irq_fail:
925         free_irq(priv->interruptError, dev);
926 err_irq_fail:
927 err_rxalloc_fail:       
928 rx_skb_fail:
929         free_skb_resources(priv);
930 tx_skb_fail:
931         dma_free_coherent(&dev->dev,
932                         sizeof(struct txbd8)*priv->tx_ring_size
933                         + sizeof(struct rxbd8)*priv->rx_ring_size,
934                         priv->tx_bd_base,
935                         gfar_read(&regs->tbase0));
936
937         return err;
938 }
939
940 /* Called when something needs to use the ethernet device */
941 /* Returns 0 for success. */
942 static int gfar_enet_open(struct net_device *dev)
943 {
944 #ifdef CONFIG_GFAR_NAPI
945         struct gfar_private *priv = netdev_priv(dev);
946 #endif
947         int err;
948
949 #ifdef CONFIG_GFAR_NAPI
950         napi_enable(&priv->napi);
951 #endif
952
953         /* Initialize a bunch of registers */
954         init_registers(dev);
955
956         gfar_set_mac_address(dev);
957
958         err = init_phy(dev);
959
960         if(err) {
961 #ifdef CONFIG_GFAR_NAPI
962                 napi_disable(&priv->napi);
963 #endif
964                 return err;
965         }
966
967         err = startup_gfar(dev);
968         if (err) {
969 #ifdef CONFIG_GFAR_NAPI
970                 napi_disable(&priv->napi);
971 #endif
972                 return err;
973         }
974
975         netif_start_queue(dev);
976
977         return err;
978 }
979
980 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
981 {
982         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
983
984         memset(fcb, 0, GMAC_FCB_LEN);
985
986         return fcb;
987 }
988
989 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
990 {
991         u8 flags = 0;
992
993         /* If we're here, it's a IP packet with a TCP or UDP
994          * payload.  We set it to checksum, using a pseudo-header
995          * we provide
996          */
997         flags = TXFCB_DEFAULT;
998
999         /* Tell the controller what the protocol is */
1000         /* And provide the already calculated phcs */
1001         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1002                 flags |= TXFCB_UDP;
1003                 fcb->phcs = udp_hdr(skb)->check;
1004         } else
1005                 fcb->phcs = tcp_hdr(skb)->check;
1006
1007         /* l3os is the distance between the start of the
1008          * frame (skb->data) and the start of the IP hdr.
1009          * l4os is the distance between the start of the
1010          * l3 hdr and the l4 hdr */
1011         fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1012         fcb->l4os = skb_network_header_len(skb);
1013
1014         fcb->flags = flags;
1015 }
1016
1017 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1018 {
1019         fcb->flags |= TXFCB_VLN;
1020         fcb->vlctl = vlan_tx_tag_get(skb);
1021 }
1022
1023 /* This is called by the kernel when a frame is ready for transmission. */
1024 /* It is pointed to by the dev->hard_start_xmit function pointer */
1025 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 {
1027         struct gfar_private *priv = netdev_priv(dev);
1028         struct txfcb *fcb = NULL;
1029         struct txbd8 *txbdp;
1030         u16 status;
1031         unsigned long flags;
1032
1033         /* Update transmit stats */
1034         dev->stats.tx_bytes += skb->len;
1035
1036         /* Lock priv now */
1037         spin_lock_irqsave(&priv->txlock, flags);
1038
1039         /* Point at the first free tx descriptor */
1040         txbdp = priv->cur_tx;
1041
1042         /* Clear all but the WRAP status flags */
1043         status = txbdp->status & TXBD_WRAP;
1044
1045         /* Set up checksumming */
1046         if (likely((dev->features & NETIF_F_IP_CSUM)
1047                         && (CHECKSUM_PARTIAL == skb->ip_summed))) {
1048                 fcb = gfar_add_fcb(skb, txbdp);
1049                 status |= TXBD_TOE;
1050                 gfar_tx_checksum(skb, fcb);
1051         }
1052
1053         if (priv->vlan_enable &&
1054                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1055                 if (unlikely(NULL == fcb)) {
1056                         fcb = gfar_add_fcb(skb, txbdp);
1057                         status |= TXBD_TOE;
1058                 }
1059
1060                 gfar_tx_vlan(skb, fcb);
1061         }
1062
1063         /* Set buffer length and pointer */
1064         txbdp->length = skb->len;
1065         txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1066                         skb->len, DMA_TO_DEVICE);
1067
1068         /* Save the skb pointer so we can free it later */
1069         priv->tx_skbuff[priv->skb_curtx] = skb;
1070
1071         /* Update the current skb pointer (wrapping if this was the last) */
1072         priv->skb_curtx =
1073             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1074
1075         /* Flag the BD as interrupt-causing */
1076         status |= TXBD_INTERRUPT;
1077
1078         /* Flag the BD as ready to go, last in frame, and  */
1079         /* in need of CRC */
1080         status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1081
1082         dev->trans_start = jiffies;
1083
1084         /* The powerpc-specific eieio() is used, as wmb() has too strong
1085          * semantics (it requires synchronization between cacheable and
1086          * uncacheable mappings, which eieio doesn't provide and which we
1087          * don't need), thus requiring a more expensive sync instruction.  At
1088          * some point, the set of architecture-independent barrier functions
1089          * should be expanded to include weaker barriers.
1090          */
1091
1092         eieio();
1093         txbdp->status = status;
1094
1095         /* If this was the last BD in the ring, the next one */
1096         /* is at the beginning of the ring */
1097         if (txbdp->status & TXBD_WRAP)
1098                 txbdp = priv->tx_bd_base;
1099         else
1100                 txbdp++;
1101
1102         /* If the next BD still needs to be cleaned up, then the bds
1103            are full.  We need to tell the kernel to stop sending us stuff. */
1104         if (txbdp == priv->dirty_tx) {
1105                 netif_stop_queue(dev);
1106
1107                 dev->stats.tx_fifo_errors++;
1108         }
1109
1110         /* Update the current txbd to the next one */
1111         priv->cur_tx = txbdp;
1112
1113         /* Tell the DMA to go go go */
1114         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1115
1116         /* Unlock priv */
1117         spin_unlock_irqrestore(&priv->txlock, flags);
1118
1119         return 0;
1120 }
1121
1122 /* Stops the kernel queue, and halts the controller */
1123 static int gfar_close(struct net_device *dev)
1124 {
1125         struct gfar_private *priv = netdev_priv(dev);
1126
1127 #ifdef CONFIG_GFAR_NAPI
1128         napi_disable(&priv->napi);
1129 #endif
1130
1131         stop_gfar(dev);
1132
1133         /* Disconnect from the PHY */
1134         phy_disconnect(priv->phydev);
1135         priv->phydev = NULL;
1136
1137         netif_stop_queue(dev);
1138
1139         return 0;
1140 }
1141
1142 /* Changes the mac address if the controller is not running. */
1143 int gfar_set_mac_address(struct net_device *dev)
1144 {
1145         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1146
1147         return 0;
1148 }
1149
1150
1151 /* Enables and disables VLAN insertion/extraction */
1152 static void gfar_vlan_rx_register(struct net_device *dev,
1153                 struct vlan_group *grp)
1154 {
1155         struct gfar_private *priv = netdev_priv(dev);
1156         unsigned long flags;
1157         u32 tempval;
1158
1159         spin_lock_irqsave(&priv->rxlock, flags);
1160
1161         priv->vlgrp = grp;
1162
1163         if (grp) {
1164                 /* Enable VLAN tag insertion */
1165                 tempval = gfar_read(&priv->regs->tctrl);
1166                 tempval |= TCTRL_VLINS;
1167
1168                 gfar_write(&priv->regs->tctrl, tempval);
1169
1170                 /* Enable VLAN tag extraction */
1171                 tempval = gfar_read(&priv->regs->rctrl);
1172                 tempval |= RCTRL_VLEX;
1173                 gfar_write(&priv->regs->rctrl, tempval);
1174         } else {
1175                 /* Disable VLAN tag insertion */
1176                 tempval = gfar_read(&priv->regs->tctrl);
1177                 tempval &= ~TCTRL_VLINS;
1178                 gfar_write(&priv->regs->tctrl, tempval);
1179
1180                 /* Disable VLAN tag extraction */
1181                 tempval = gfar_read(&priv->regs->rctrl);
1182                 tempval &= ~RCTRL_VLEX;
1183                 gfar_write(&priv->regs->rctrl, tempval);
1184         }
1185
1186         spin_unlock_irqrestore(&priv->rxlock, flags);
1187 }
1188
1189 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1190 {
1191         int tempsize, tempval;
1192         struct gfar_private *priv = netdev_priv(dev);
1193         int oldsize = priv->rx_buffer_size;
1194         int frame_size = new_mtu + ETH_HLEN;
1195
1196         if (priv->vlan_enable)
1197                 frame_size += VLAN_HLEN;
1198
1199         if (gfar_uses_fcb(priv))
1200                 frame_size += GMAC_FCB_LEN;
1201
1202         frame_size += priv->padding;
1203
1204         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1205                 if (netif_msg_drv(priv))
1206                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1207                                         dev->name);
1208                 return -EINVAL;
1209         }
1210
1211         tempsize =
1212             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1213             INCREMENTAL_BUFFER_SIZE;
1214
1215         /* Only stop and start the controller if it isn't already
1216          * stopped, and we changed something */
1217         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1218                 stop_gfar(dev);
1219
1220         priv->rx_buffer_size = tempsize;
1221
1222         dev->mtu = new_mtu;
1223
1224         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1225         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1226
1227         /* If the mtu is larger than the max size for standard
1228          * ethernet frames (ie, a jumbo frame), then set maccfg2
1229          * to allow huge frames, and to check the length */
1230         tempval = gfar_read(&priv->regs->maccfg2);
1231
1232         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1233                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1234         else
1235                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1236
1237         gfar_write(&priv->regs->maccfg2, tempval);
1238
1239         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1240                 startup_gfar(dev);
1241
1242         return 0;
1243 }
1244
1245 /* gfar_timeout gets called when a packet has not been
1246  * transmitted after a set amount of time.
1247  * For now, assume that clearing out all the structures, and
1248  * starting over will fix the problem. */
1249 static void gfar_timeout(struct net_device *dev)
1250 {
1251         dev->stats.tx_errors++;
1252
1253         if (dev->flags & IFF_UP) {
1254                 stop_gfar(dev);
1255                 startup_gfar(dev);
1256         }
1257
1258         netif_schedule(dev);
1259 }
1260
1261 /* Interrupt Handler for Transmit complete */
1262 int gfar_clean_tx_ring(struct net_device *dev)
1263 {
1264         struct txbd8 *bdp;
1265         struct gfar_private *priv = netdev_priv(dev);
1266         int howmany = 0;
1267
1268         bdp = priv->dirty_tx;
1269         while ((bdp->status & TXBD_READY) == 0) {
1270                 /* If dirty_tx and cur_tx are the same, then either the */
1271                 /* ring is empty or full now (it could only be full in the beginning, */
1272                 /* obviously).  If it is empty, we are done. */
1273                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1274                         break;
1275
1276                 howmany++;
1277
1278                 /* Deferred means some collisions occurred during transmit, */
1279                 /* but we eventually sent the packet. */
1280                 if (bdp->status & TXBD_DEF)
1281                         dev->stats.collisions++;
1282
1283                 /* Free the sk buffer associated with this TxBD */
1284                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1285
1286                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1287                 priv->skb_dirtytx =
1288                     (priv->skb_dirtytx +
1289                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1290
1291                 /* Clean BD length for empty detection */
1292                 bdp->length = 0;
1293
1294                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1295                 if (bdp->status & TXBD_WRAP)
1296                         bdp = priv->tx_bd_base;
1297                 else
1298                         bdp++;
1299
1300                 /* Move dirty_tx to be the next bd */
1301                 priv->dirty_tx = bdp;
1302
1303                 /* We freed a buffer, so now we can restart transmission */
1304                 if (netif_queue_stopped(dev))
1305                         netif_wake_queue(dev);
1306         } /* while ((bdp->status & TXBD_READY) == 0) */
1307
1308         dev->stats.tx_packets += howmany;
1309
1310         return howmany;
1311 }
1312
1313 /* Interrupt Handler for Transmit complete */
1314 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1315 {
1316         struct net_device *dev = (struct net_device *) dev_id;
1317         struct gfar_private *priv = netdev_priv(dev);
1318
1319         /* Clear IEVENT */
1320         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1321
1322         /* Lock priv */
1323         spin_lock(&priv->txlock);
1324
1325         gfar_clean_tx_ring(dev);
1326
1327         /* If we are coalescing the interrupts, reset the timer */
1328         /* Otherwise, clear it */
1329         if (likely(priv->txcoalescing)) {
1330                 gfar_write(&priv->regs->txic, 0);
1331                 gfar_write(&priv->regs->txic,
1332                            mk_ic_value(priv->txcount, priv->txtime));
1333         }
1334
1335         spin_unlock(&priv->txlock);
1336
1337         return IRQ_HANDLED;
1338 }
1339
1340 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1341                 struct sk_buff *skb)
1342 {
1343         struct gfar_private *priv = netdev_priv(dev);
1344         u32 * status_len = (u32 *)bdp;
1345         u16 flags;
1346
1347         bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1348                         priv->rx_buffer_size, DMA_FROM_DEVICE);
1349
1350         flags = RXBD_EMPTY | RXBD_INTERRUPT;
1351
1352         if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1353                 flags |= RXBD_WRAP;
1354
1355         eieio();
1356
1357         *status_len = (u32)flags << 16;
1358 }
1359
1360
1361 struct sk_buff * gfar_new_skb(struct net_device *dev)
1362 {
1363         unsigned int alignamount;
1364         struct gfar_private *priv = netdev_priv(dev);
1365         struct sk_buff *skb = NULL;
1366
1367         /* We have to allocate the skb, so keep trying till we succeed */
1368         skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1369
1370         if (!skb)
1371                 return NULL;
1372
1373         alignamount = RXBUF_ALIGNMENT -
1374                 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1375
1376         /* We need the data buffer to be aligned properly.  We will reserve
1377          * as many bytes as needed to align the data properly
1378          */
1379         skb_reserve(skb, alignamount);
1380
1381         return skb;
1382 }
1383
1384 static inline void count_errors(unsigned short status, struct net_device *dev)
1385 {
1386         struct gfar_private *priv = netdev_priv(dev);
1387         struct net_device_stats *stats = &dev->stats;
1388         struct gfar_extra_stats *estats = &priv->extra_stats;
1389
1390         /* If the packet was truncated, none of the other errors
1391          * matter */
1392         if (status & RXBD_TRUNCATED) {
1393                 stats->rx_length_errors++;
1394
1395                 estats->rx_trunc++;
1396
1397                 return;
1398         }
1399         /* Count the errors, if there were any */
1400         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1401                 stats->rx_length_errors++;
1402
1403                 if (status & RXBD_LARGE)
1404                         estats->rx_large++;
1405                 else
1406                         estats->rx_short++;
1407         }
1408         if (status & RXBD_NONOCTET) {
1409                 stats->rx_frame_errors++;
1410                 estats->rx_nonoctet++;
1411         }
1412         if (status & RXBD_CRCERR) {
1413                 estats->rx_crcerr++;
1414                 stats->rx_crc_errors++;
1415         }
1416         if (status & RXBD_OVERRUN) {
1417                 estats->rx_overrun++;
1418                 stats->rx_crc_errors++;
1419         }
1420 }
1421
1422 irqreturn_t gfar_receive(int irq, void *dev_id)
1423 {
1424         struct net_device *dev = (struct net_device *) dev_id;
1425         struct gfar_private *priv = netdev_priv(dev);
1426 #ifdef CONFIG_GFAR_NAPI
1427         u32 tempval;
1428 #else
1429         unsigned long flags;
1430 #endif
1431
1432         /* support NAPI */
1433 #ifdef CONFIG_GFAR_NAPI
1434         /* Clear IEVENT, so interrupts aren't called again
1435          * because of the packets that have already arrived */
1436         gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1437
1438         if (netif_rx_schedule_prep(dev, &priv->napi)) {
1439                 tempval = gfar_read(&priv->regs->imask);
1440                 tempval &= IMASK_RTX_DISABLED;
1441                 gfar_write(&priv->regs->imask, tempval);
1442
1443                 __netif_rx_schedule(dev, &priv->napi);
1444         } else {
1445                 if (netif_msg_rx_err(priv))
1446                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1447                                 dev->name, gfar_read(&priv->regs->ievent),
1448                                 gfar_read(&priv->regs->imask));
1449         }
1450 #else
1451         /* Clear IEVENT, so rx interrupt isn't called again
1452          * because of this interrupt */
1453         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1454
1455         spin_lock_irqsave(&priv->rxlock, flags);
1456         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1457
1458         /* If we are coalescing interrupts, update the timer */
1459         /* Otherwise, clear it */
1460         if (likely(priv->rxcoalescing)) {
1461                 gfar_write(&priv->regs->rxic, 0);
1462                 gfar_write(&priv->regs->rxic,
1463                            mk_ic_value(priv->rxcount, priv->rxtime));
1464         }
1465
1466         spin_unlock_irqrestore(&priv->rxlock, flags);
1467 #endif
1468
1469         return IRQ_HANDLED;
1470 }
1471
1472 static inline int gfar_rx_vlan(struct sk_buff *skb,
1473                 struct vlan_group *vlgrp, unsigned short vlctl)
1474 {
1475 #ifdef CONFIG_GFAR_NAPI
1476         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1477 #else
1478         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1479 #endif
1480 }
1481
1482 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1483 {
1484         /* If valid headers were found, and valid sums
1485          * were verified, then we tell the kernel that no
1486          * checksumming is necessary.  Otherwise, it is */
1487         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1488                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489         else
1490                 skb->ip_summed = CHECKSUM_NONE;
1491 }
1492
1493
1494 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1495 {
1496         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1497
1498         /* Remove the FCB from the skb */
1499         skb_pull(skb, GMAC_FCB_LEN);
1500
1501         return fcb;
1502 }
1503
1504 /* gfar_process_frame() -- handle one incoming packet if skb
1505  * isn't NULL.  */
1506 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1507                 int length)
1508 {
1509         struct gfar_private *priv = netdev_priv(dev);
1510         struct rxfcb *fcb = NULL;
1511
1512         if (NULL == skb) {
1513                 if (netif_msg_rx_err(priv))
1514                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1515                 dev->stats.rx_dropped++;
1516                 priv->extra_stats.rx_skbmissing++;
1517         } else {
1518                 int ret;
1519
1520                 /* Prep the skb for the packet */
1521                 skb_put(skb, length);
1522
1523                 /* Grab the FCB if there is one */
1524                 if (gfar_uses_fcb(priv))
1525                         fcb = gfar_get_fcb(skb);
1526
1527                 /* Remove the padded bytes, if there are any */
1528                 if (priv->padding)
1529                         skb_pull(skb, priv->padding);
1530
1531                 if (priv->rx_csum_enable)
1532                         gfar_rx_checksum(skb, fcb);
1533
1534                 /* Tell the skb what kind of packet this is */
1535                 skb->protocol = eth_type_trans(skb, dev);
1536
1537                 /* Send the packet up the stack */
1538                 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1539                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1540                 else
1541                         ret = RECEIVE(skb);
1542
1543                 if (NET_RX_DROP == ret)
1544                         priv->extra_stats.kernel_dropped++;
1545         }
1546
1547         return 0;
1548 }
1549
1550 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1551  *   until the budget/quota has been reached. Returns the number
1552  *   of frames handled
1553  */
1554 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1555 {
1556         struct rxbd8 *bdp;
1557         struct sk_buff *skb;
1558         u16 pkt_len;
1559         int howmany = 0;
1560         struct gfar_private *priv = netdev_priv(dev);
1561
1562         /* Get the first full descriptor */
1563         bdp = priv->cur_rx;
1564
1565         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1566                 struct sk_buff *newskb;
1567                 rmb();
1568
1569                 /* Add another skb for the future */
1570                 newskb = gfar_new_skb(dev);
1571
1572                 skb = priv->rx_skbuff[priv->skb_currx];
1573
1574                 /* We drop the frame if we failed to allocate a new buffer */
1575                 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1576                                  bdp->status & RXBD_ERR)) {
1577                         count_errors(bdp->status, dev);
1578
1579                         if (unlikely(!newskb))
1580                                 newskb = skb;
1581
1582                         if (skb) {
1583                                 dma_unmap_single(&priv->dev->dev,
1584                                                 bdp->bufPtr,
1585                                                 priv->rx_buffer_size,
1586                                                 DMA_FROM_DEVICE);
1587
1588                                 dev_kfree_skb_any(skb);
1589                         }
1590                 } else {
1591                         /* Increment the number of packets */
1592                         dev->stats.rx_packets++;
1593                         howmany++;
1594
1595                         /* Remove the FCS from the packet length */
1596                         pkt_len = bdp->length - 4;
1597
1598                         gfar_process_frame(dev, skb, pkt_len);
1599
1600                         dev->stats.rx_bytes += pkt_len;
1601                 }
1602
1603                 dev->last_rx = jiffies;
1604
1605                 priv->rx_skbuff[priv->skb_currx] = newskb;
1606
1607                 /* Setup the new bdp */
1608                 gfar_new_rxbdp(dev, bdp, newskb);
1609
1610                 /* Update to the next pointer */
1611                 if (bdp->status & RXBD_WRAP)
1612                         bdp = priv->rx_bd_base;
1613                 else
1614                         bdp++;
1615
1616                 /* update to point at the next skb */
1617                 priv->skb_currx =
1618                     (priv->skb_currx + 1) &
1619                     RX_RING_MOD_MASK(priv->rx_ring_size);
1620         }
1621
1622         /* Update the current rxbd pointer to be the next one */
1623         priv->cur_rx = bdp;
1624
1625         return howmany;
1626 }
1627
1628 #ifdef CONFIG_GFAR_NAPI
1629 static int gfar_poll(struct napi_struct *napi, int budget)
1630 {
1631         struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1632         struct net_device *dev = priv->dev;
1633         int howmany;
1634         unsigned long flags;
1635
1636         /* If we fail to get the lock, don't bother with the TX BDs */
1637         if (spin_trylock_irqsave(&priv->txlock, flags)) {
1638                 gfar_clean_tx_ring(dev);
1639                 spin_unlock_irqrestore(&priv->txlock, flags);
1640         }
1641
1642         howmany = gfar_clean_rx_ring(dev, budget);
1643
1644         if (howmany < budget) {
1645                 netif_rx_complete(dev, napi);
1646
1647                 /* Clear the halt bit in RSTAT */
1648                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1649
1650                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1651
1652                 /* If we are coalescing interrupts, update the timer */
1653                 /* Otherwise, clear it */
1654                 if (likely(priv->rxcoalescing)) {
1655                         gfar_write(&priv->regs->rxic, 0);
1656                         gfar_write(&priv->regs->rxic,
1657                                    mk_ic_value(priv->rxcount, priv->rxtime));
1658                 }
1659         }
1660
1661         return howmany;
1662 }
1663 #endif
1664
1665 #ifdef CONFIG_NET_POLL_CONTROLLER
1666 /*
1667  * Polling 'interrupt' - used by things like netconsole to send skbs
1668  * without having to re-enable interrupts. It's not called while
1669  * the interrupt routine is executing.
1670  */
1671 static void gfar_netpoll(struct net_device *dev)
1672 {
1673         struct gfar_private *priv = netdev_priv(dev);
1674
1675         /* If the device has multiple interrupts, run tx/rx */
1676         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1677                 disable_irq(priv->interruptTransmit);
1678                 disable_irq(priv->interruptReceive);
1679                 disable_irq(priv->interruptError);
1680                 gfar_interrupt(priv->interruptTransmit, dev);
1681                 enable_irq(priv->interruptError);
1682                 enable_irq(priv->interruptReceive);
1683                 enable_irq(priv->interruptTransmit);
1684         } else {
1685                 disable_irq(priv->interruptTransmit);
1686                 gfar_interrupt(priv->interruptTransmit, dev);
1687                 enable_irq(priv->interruptTransmit);
1688         }
1689 }
1690 #endif
1691
1692 /* The interrupt handler for devices with one interrupt */
1693 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1694 {
1695         struct net_device *dev = dev_id;
1696         struct gfar_private *priv = netdev_priv(dev);
1697
1698         /* Save ievent for future reference */
1699         u32 events = gfar_read(&priv->regs->ievent);
1700
1701         /* Check for reception */
1702         if (events & IEVENT_RX_MASK)
1703                 gfar_receive(irq, dev_id);
1704
1705         /* Check for transmit completion */
1706         if (events & IEVENT_TX_MASK)
1707                 gfar_transmit(irq, dev_id);
1708
1709         /* Check for errors */
1710         if (events & IEVENT_ERR_MASK)
1711                 gfar_error(irq, dev_id);
1712
1713         return IRQ_HANDLED;
1714 }
1715
1716 /* Called every time the controller might need to be made
1717  * aware of new link state.  The PHY code conveys this
1718  * information through variables in the phydev structure, and this
1719  * function converts those variables into the appropriate
1720  * register values, and can bring down the device if needed.
1721  */
1722 static void adjust_link(struct net_device *dev)
1723 {
1724         struct gfar_private *priv = netdev_priv(dev);
1725         struct gfar __iomem *regs = priv->regs;
1726         unsigned long flags;
1727         struct phy_device *phydev = priv->phydev;
1728         int new_state = 0;
1729
1730         spin_lock_irqsave(&priv->txlock, flags);
1731         if (phydev->link) {
1732                 u32 tempval = gfar_read(&regs->maccfg2);
1733                 u32 ecntrl = gfar_read(&regs->ecntrl);
1734
1735                 /* Now we make sure that we can be in full duplex mode.
1736                  * If not, we operate in half-duplex mode. */
1737                 if (phydev->duplex != priv->oldduplex) {
1738                         new_state = 1;
1739                         if (!(phydev->duplex))
1740                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1741                         else
1742                                 tempval |= MACCFG2_FULL_DUPLEX;
1743
1744                         priv->oldduplex = phydev->duplex;
1745                 }
1746
1747                 if (phydev->speed != priv->oldspeed) {
1748                         new_state = 1;
1749                         switch (phydev->speed) {
1750                         case 1000:
1751                                 tempval =
1752                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1753                                 break;
1754                         case 100:
1755                         case 10:
1756                                 tempval =
1757                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1758
1759                                 /* Reduced mode distinguishes
1760                                  * between 10 and 100 */
1761                                 if (phydev->speed == SPEED_100)
1762                                         ecntrl |= ECNTRL_R100;
1763                                 else
1764                                         ecntrl &= ~(ECNTRL_R100);
1765                                 break;
1766                         default:
1767                                 if (netif_msg_link(priv))
1768                                         printk(KERN_WARNING
1769                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1770                                                 dev->name, phydev->speed);
1771                                 break;
1772                         }
1773
1774                         priv->oldspeed = phydev->speed;
1775                 }
1776
1777                 gfar_write(&regs->maccfg2, tempval);
1778                 gfar_write(&regs->ecntrl, ecntrl);
1779
1780                 if (!priv->oldlink) {
1781                         new_state = 1;
1782                         priv->oldlink = 1;
1783                         netif_schedule(dev);
1784                 }
1785         } else if (priv->oldlink) {
1786                 new_state = 1;
1787                 priv->oldlink = 0;
1788                 priv->oldspeed = 0;
1789                 priv->oldduplex = -1;
1790         }
1791
1792         if (new_state && netif_msg_link(priv))
1793                 phy_print_status(phydev);
1794
1795         spin_unlock_irqrestore(&priv->txlock, flags);
1796 }
1797
1798 /* Update the hash table based on the current list of multicast
1799  * addresses we subscribe to.  Also, change the promiscuity of
1800  * the device based on the flags (this function is called
1801  * whenever dev->flags is changed */
1802 static void gfar_set_multi(struct net_device *dev)
1803 {
1804         struct dev_mc_list *mc_ptr;
1805         struct gfar_private *priv = netdev_priv(dev);
1806         struct gfar __iomem *regs = priv->regs;
1807         u32 tempval;
1808
1809         if(dev->flags & IFF_PROMISC) {
1810                 /* Set RCTRL to PROM */
1811                 tempval = gfar_read(&regs->rctrl);
1812                 tempval |= RCTRL_PROM;
1813                 gfar_write(&regs->rctrl, tempval);
1814         } else {
1815                 /* Set RCTRL to not PROM */
1816                 tempval = gfar_read(&regs->rctrl);
1817                 tempval &= ~(RCTRL_PROM);
1818                 gfar_write(&regs->rctrl, tempval);
1819         }
1820
1821         if(dev->flags & IFF_ALLMULTI) {
1822                 /* Set the hash to rx all multicast frames */
1823                 gfar_write(&regs->igaddr0, 0xffffffff);
1824                 gfar_write(&regs->igaddr1, 0xffffffff);
1825                 gfar_write(&regs->igaddr2, 0xffffffff);
1826                 gfar_write(&regs->igaddr3, 0xffffffff);
1827                 gfar_write(&regs->igaddr4, 0xffffffff);
1828                 gfar_write(&regs->igaddr5, 0xffffffff);
1829                 gfar_write(&regs->igaddr6, 0xffffffff);
1830                 gfar_write(&regs->igaddr7, 0xffffffff);
1831                 gfar_write(&regs->gaddr0, 0xffffffff);
1832                 gfar_write(&regs->gaddr1, 0xffffffff);
1833                 gfar_write(&regs->gaddr2, 0xffffffff);
1834                 gfar_write(&regs->gaddr3, 0xffffffff);
1835                 gfar_write(&regs->gaddr4, 0xffffffff);
1836                 gfar_write(&regs->gaddr5, 0xffffffff);
1837                 gfar_write(&regs->gaddr6, 0xffffffff);
1838                 gfar_write(&regs->gaddr7, 0xffffffff);
1839         } else {
1840                 int em_num;
1841                 int idx;
1842
1843                 /* zero out the hash */
1844                 gfar_write(&regs->igaddr0, 0x0);
1845                 gfar_write(&regs->igaddr1, 0x0);
1846                 gfar_write(&regs->igaddr2, 0x0);
1847                 gfar_write(&regs->igaddr3, 0x0);
1848                 gfar_write(&regs->igaddr4, 0x0);
1849                 gfar_write(&regs->igaddr5, 0x0);
1850                 gfar_write(&regs->igaddr6, 0x0);
1851                 gfar_write(&regs->igaddr7, 0x0);
1852                 gfar_write(&regs->gaddr0, 0x0);
1853                 gfar_write(&regs->gaddr1, 0x0);
1854                 gfar_write(&regs->gaddr2, 0x0);
1855                 gfar_write(&regs->gaddr3, 0x0);
1856                 gfar_write(&regs->gaddr4, 0x0);
1857                 gfar_write(&regs->gaddr5, 0x0);
1858                 gfar_write(&regs->gaddr6, 0x0);
1859                 gfar_write(&regs->gaddr7, 0x0);
1860
1861                 /* If we have extended hash tables, we need to
1862                  * clear the exact match registers to prepare for
1863                  * setting them */
1864                 if (priv->extended_hash) {
1865                         em_num = GFAR_EM_NUM + 1;
1866                         gfar_clear_exact_match(dev);
1867                         idx = 1;
1868                 } else {
1869                         idx = 0;
1870                         em_num = 0;
1871                 }
1872
1873                 if(dev->mc_count == 0)
1874                         return;
1875
1876                 /* Parse the list, and set the appropriate bits */
1877                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1878                         if (idx < em_num) {
1879                                 gfar_set_mac_for_addr(dev, idx,
1880                                                 mc_ptr->dmi_addr);
1881                                 idx++;
1882                         } else
1883                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1884                 }
1885         }
1886
1887         return;
1888 }
1889
1890
1891 /* Clears each of the exact match registers to zero, so they
1892  * don't interfere with normal reception */
1893 static void gfar_clear_exact_match(struct net_device *dev)
1894 {
1895         int idx;
1896         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1897
1898         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1899                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1900 }
1901
1902 /* Set the appropriate hash bit for the given addr */
1903 /* The algorithm works like so:
1904  * 1) Take the Destination Address (ie the multicast address), and
1905  * do a CRC on it (little endian), and reverse the bits of the
1906  * result.
1907  * 2) Use the 8 most significant bits as a hash into a 256-entry
1908  * table.  The table is controlled through 8 32-bit registers:
1909  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1910  * gaddr7.  This means that the 3 most significant bits in the
1911  * hash index which gaddr register to use, and the 5 other bits
1912  * indicate which bit (assuming an IBM numbering scheme, which
1913  * for PowerPC (tm) is usually the case) in the register holds
1914  * the entry. */
1915 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1916 {
1917         u32 tempval;
1918         struct gfar_private *priv = netdev_priv(dev);
1919         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1920         int width = priv->hash_width;
1921         u8 whichbit = (result >> (32 - width)) & 0x1f;
1922         u8 whichreg = result >> (32 - width + 5);
1923         u32 value = (1 << (31-whichbit));
1924
1925         tempval = gfar_read(priv->hash_regs[whichreg]);
1926         tempval |= value;
1927         gfar_write(priv->hash_regs[whichreg], tempval);
1928
1929         return;
1930 }
1931
1932
1933 /* There are multiple MAC Address register pairs on some controllers
1934  * This function sets the numth pair to a given address
1935  */
1936 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1937 {
1938         struct gfar_private *priv = netdev_priv(dev);
1939         int idx;
1940         char tmpbuf[MAC_ADDR_LEN];
1941         u32 tempval;
1942         u32 __iomem *macptr = &priv->regs->macstnaddr1;
1943
1944         macptr += num*2;
1945
1946         /* Now copy it into the mac registers backwards, cuz */
1947         /* little endian is silly */
1948         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1949                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1950
1951         gfar_write(macptr, *((u32 *) (tmpbuf)));
1952
1953         tempval = *((u32 *) (tmpbuf + 4));
1954
1955         gfar_write(macptr+1, tempval);
1956 }
1957
1958 /* GFAR error interrupt handler */
1959 static irqreturn_t gfar_error(int irq, void *dev_id)
1960 {
1961         struct net_device *dev = dev_id;
1962         struct gfar_private *priv = netdev_priv(dev);
1963
1964         /* Save ievent for future reference */
1965         u32 events = gfar_read(&priv->regs->ievent);
1966
1967         /* Clear IEVENT */
1968         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1969
1970         /* Hmm... */
1971         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1972                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1973                        dev->name, events, gfar_read(&priv->regs->imask));
1974
1975         /* Update the error counters */
1976         if (events & IEVENT_TXE) {
1977                 dev->stats.tx_errors++;
1978
1979                 if (events & IEVENT_LC)
1980                         dev->stats.tx_window_errors++;
1981                 if (events & IEVENT_CRL)
1982                         dev->stats.tx_aborted_errors++;
1983                 if (events & IEVENT_XFUN) {
1984                         if (netif_msg_tx_err(priv))
1985                                 printk(KERN_DEBUG "%s: TX FIFO underrun, "
1986                                        "packet dropped.\n", dev->name);
1987                         dev->stats.tx_dropped++;
1988                         priv->extra_stats.tx_underrun++;
1989
1990                         /* Reactivate the Tx Queues */
1991                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1992                 }
1993                 if (netif_msg_tx_err(priv))
1994                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1995         }
1996         if (events & IEVENT_BSY) {
1997                 dev->stats.rx_errors++;
1998                 priv->extra_stats.rx_bsy++;
1999
2000                 gfar_receive(irq, dev_id);
2001
2002 #ifndef CONFIG_GFAR_NAPI
2003                 /* Clear the halt bit in RSTAT */
2004                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
2005 #endif
2006
2007                 if (netif_msg_rx_err(priv))
2008                         printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2009                                dev->name, gfar_read(&priv->regs->rstat));
2010         }
2011         if (events & IEVENT_BABR) {
2012                 dev->stats.rx_errors++;
2013                 priv->extra_stats.rx_babr++;
2014
2015                 if (netif_msg_rx_err(priv))
2016                         printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2017         }
2018         if (events & IEVENT_EBERR) {
2019                 priv->extra_stats.eberr++;
2020                 if (netif_msg_rx_err(priv))
2021                         printk(KERN_DEBUG "%s: bus error\n", dev->name);
2022         }
2023         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2024                 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2025
2026         if (events & IEVENT_BABT) {
2027                 priv->extra_stats.tx_babt++;
2028                 if (netif_msg_tx_err(priv))
2029                         printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2030         }
2031         return IRQ_HANDLED;
2032 }
2033
2034 /* work with hotplug and coldplug */
2035 MODULE_ALIAS("platform:fsl-gianfar");
2036
2037 /* Structure for a device driver */
2038 static struct platform_driver gfar_driver = {
2039         .probe = gfar_probe,
2040         .remove = gfar_remove,
2041         .driver = {
2042                 .name = "fsl-gianfar",
2043                 .owner = THIS_MODULE,
2044         },
2045 };
2046
2047 static int __init gfar_init(void)
2048 {
2049         int err = gfar_mdio_init();
2050
2051         if (err)
2052                 return err;
2053
2054         err = platform_driver_register(&gfar_driver);
2055
2056         if (err)
2057                 gfar_mdio_exit();
2058
2059         return err;
2060 }
2061
2062 static void __exit gfar_exit(void)
2063 {
2064         platform_driver_unregister(&gfar_driver);
2065         gfar_mdio_exit();
2066 }
2067
2068 module_init(gfar_init);
2069 module_exit(gfar_exit);
2070