[POWERPC] PS3: Checkpatch cleanups for drivers/ps3/ps3-sys-manager.c
[linux-2.6] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46
47 #include "core.h"
48
49 /*
50  * Lack of dma_unmap_???? calls is intentional.
51  *
52  * API-correct usage requires additional support state information to be
53  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54  * EMAC design (e.g. TX buffer passed from network stack can be split into
55  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56  * maintaining such information will add additional overhead.
57  * Current DMA API implementation for 4xx processors only ensures cache coherency
58  * and dma_unmap_???? routines are empty and are likely to stay this way.
59  * I decided to omit dma_unmap_??? calls because I don't want to add additional
60  * complexity just for the sake of following some abstract API, when it doesn't
61  * add any real benefit to the driver. I understand that this decision maybe
62  * controversial, but I really tried to make code API-correct and efficient
63  * at the same time and didn't come up with code I liked :(.                --ebs
64  */
65
66 #define DRV_NAME        "emac"
67 #define DRV_VERSION     "3.54"
68 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
69
70 MODULE_DESCRIPTION(DRV_DESC);
71 MODULE_AUTHOR
72     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
74
75 /*
76  * PPC64 doesn't (yet) have a cacheable_memcpy
77  */
78 #ifdef CONFIG_PPC64
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
80 #endif
81
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
84
85 /* If packet size is less than this number, we allocate small skb and copy packet
86  * contents into it instead of just sending original big skb up
87  */
88 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
89
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91  * to avoid re-using the same PHY ID in cases where the arch didn't
92  * setup precise phy_map entries
93  *
94  * XXX This is something that needs to be reworked as we can have multiple
95  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96  * probably require in that case to have explicit PHY IDs in the device-tree
97  */
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
100
101 /* This is the wait queue used to wait on any event related to probe, that
102  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106 /* Having stable interface names is a doomed idea. However, it would be nice
107  * if we didn't have completely random interface names at boot too :-) It's
108  * just a matter of making everybody's life easier. Since we are doing
109  * threaded probing, it's a bit harder though. The base idea here is that
110  * we make up a list of all emacs in the device-tree before we register the
111  * driver. Every emac will then wait for the previous one in the list to
112  * initialize before itself. We should also keep that list ordered by
113  * cell_index.
114  * That list is only 4 entries long, meaning that additional EMACs don't
115  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116  */
117
118 #define EMAC_BOOT_LIST_SIZE     4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
123
124 /* I don't want to litter system log with timeout errors
125  * when we have brain-damaged PHY.
126  */
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128                                              const char *error)
129 {
130         if (net_ratelimit())
131                 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
132 }
133
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON        HZ
136 #define PHY_POLL_LINK_OFF       (HZ / 5)
137
138 /* Graceful stop timeouts in us.
139  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
140  */
141 #define STOP_TIMEOUT_10         1230
142 #define STOP_TIMEOUT_100        124
143 #define STOP_TIMEOUT_1000       13
144 #define STOP_TIMEOUT_1000_JUMBO 73
145
146 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
147 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
148         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
149         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
150         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
151         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
152         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
153         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
154         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
155         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
156         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
157         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
158         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
159         "tx_bd_excessive_collisions", "tx_bd_late_collision",
160         "tx_bd_multple_collisions", "tx_bd_single_collision",
161         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
162         "tx_errors"
163 };
164
165 static irqreturn_t emac_irq(int irq, void *dev_instance);
166 static void emac_clean_tx_ring(struct emac_instance *dev);
167 static void __emac_set_multicast_list(struct emac_instance *dev);
168
169 static inline int emac_phy_supports_gige(int phy_mode)
170 {
171         return  phy_mode == PHY_MODE_GMII ||
172                 phy_mode == PHY_MODE_RGMII ||
173                 phy_mode == PHY_MODE_TBI ||
174                 phy_mode == PHY_MODE_RTBI;
175 }
176
177 static inline int emac_phy_gpcs(int phy_mode)
178 {
179         return  phy_mode == PHY_MODE_TBI ||
180                 phy_mode == PHY_MODE_RTBI;
181 }
182
183 static inline void emac_tx_enable(struct emac_instance *dev)
184 {
185         struct emac_regs __iomem *p = dev->emacp;
186         u32 r;
187
188         DBG(dev, "tx_enable" NL);
189
190         r = in_be32(&p->mr0);
191         if (!(r & EMAC_MR0_TXE))
192                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
193 }
194
195 static void emac_tx_disable(struct emac_instance *dev)
196 {
197         struct emac_regs __iomem *p = dev->emacp;
198         u32 r;
199
200         DBG(dev, "tx_disable" NL);
201
202         r = in_be32(&p->mr0);
203         if (r & EMAC_MR0_TXE) {
204                 int n = dev->stop_timeout;
205                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
206                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
207                         udelay(1);
208                         --n;
209                 }
210                 if (unlikely(!n))
211                         emac_report_timeout_error(dev, "TX disable timeout");
212         }
213 }
214
215 static void emac_rx_enable(struct emac_instance *dev)
216 {
217         struct emac_regs __iomem *p = dev->emacp;
218         u32 r;
219
220         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
221                 goto out;
222
223         DBG(dev, "rx_enable" NL);
224
225         r = in_be32(&p->mr0);
226         if (!(r & EMAC_MR0_RXE)) {
227                 if (unlikely(!(r & EMAC_MR0_RXI))) {
228                         /* Wait if previous async disable is still in progress */
229                         int n = dev->stop_timeout;
230                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
231                                 udelay(1);
232                                 --n;
233                         }
234                         if (unlikely(!n))
235                                 emac_report_timeout_error(dev,
236                                                           "RX disable timeout");
237                 }
238                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
239         }
240  out:
241         ;
242 }
243
244 static void emac_rx_disable(struct emac_instance *dev)
245 {
246         struct emac_regs __iomem *p = dev->emacp;
247         u32 r;
248
249         DBG(dev, "rx_disable" NL);
250
251         r = in_be32(&p->mr0);
252         if (r & EMAC_MR0_RXE) {
253                 int n = dev->stop_timeout;
254                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
255                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
256                         udelay(1);
257                         --n;
258                 }
259                 if (unlikely(!n))
260                         emac_report_timeout_error(dev, "RX disable timeout");
261         }
262 }
263
264 static inline void emac_netif_stop(struct emac_instance *dev)
265 {
266         netif_tx_lock_bh(dev->ndev);
267         dev->no_mcast = 1;
268         netif_tx_unlock_bh(dev->ndev);
269         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
270         mal_poll_disable(dev->mal, &dev->commac);
271         netif_tx_disable(dev->ndev);
272 }
273
274 static inline void emac_netif_start(struct emac_instance *dev)
275 {
276         netif_tx_lock_bh(dev->ndev);
277         dev->no_mcast = 0;
278         if (dev->mcast_pending && netif_running(dev->ndev))
279                 __emac_set_multicast_list(dev);
280         netif_tx_unlock_bh(dev->ndev);
281
282         netif_wake_queue(dev->ndev);
283
284         /* NOTE: unconditional netif_wake_queue is only appropriate
285          * so long as all callers are assured to have free tx slots
286          * (taken from tg3... though the case where that is wrong is
287          *  not terribly harmful)
288          */
289         mal_poll_enable(dev->mal, &dev->commac);
290 }
291
292 static inline void emac_rx_disable_async(struct emac_instance *dev)
293 {
294         struct emac_regs __iomem *p = dev->emacp;
295         u32 r;
296
297         DBG(dev, "rx_disable_async" NL);
298
299         r = in_be32(&p->mr0);
300         if (r & EMAC_MR0_RXE)
301                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
302 }
303
304 static int emac_reset(struct emac_instance *dev)
305 {
306         struct emac_regs __iomem *p = dev->emacp;
307         int n = 20;
308
309         DBG(dev, "reset" NL);
310
311         if (!dev->reset_failed) {
312                 /* 40x erratum suggests stopping RX channel before reset,
313                  * we stop TX as well
314                  */
315                 emac_rx_disable(dev);
316                 emac_tx_disable(dev);
317         }
318
319         out_be32(&p->mr0, EMAC_MR0_SRST);
320         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
321                 --n;
322
323         if (n) {
324                 dev->reset_failed = 0;
325                 return 0;
326         } else {
327                 emac_report_timeout_error(dev, "reset timeout");
328                 dev->reset_failed = 1;
329                 return -ETIMEDOUT;
330         }
331 }
332
333 static void emac_hash_mc(struct emac_instance *dev)
334 {
335         struct emac_regs __iomem *p = dev->emacp;
336         u16 gaht[4] = { 0 };
337         struct dev_mc_list *dmi;
338
339         DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
340
341         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
342                 int bit;
343                 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
344                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
345                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
346
347                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
348                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
349         }
350         out_be32(&p->gaht1, gaht[0]);
351         out_be32(&p->gaht2, gaht[1]);
352         out_be32(&p->gaht3, gaht[2]);
353         out_be32(&p->gaht4, gaht[3]);
354 }
355
356 static inline u32 emac_iff2rmr(struct net_device *ndev)
357 {
358         struct emac_instance *dev = netdev_priv(ndev);
359         u32 r;
360
361         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
362
363         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
364             r |= EMAC4_RMR_BASE;
365         else
366             r |= EMAC_RMR_BASE;
367
368         if (ndev->flags & IFF_PROMISC)
369                 r |= EMAC_RMR_PME;
370         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
371                 r |= EMAC_RMR_PMME;
372         else if (ndev->mc_count > 0)
373                 r |= EMAC_RMR_MAE;
374
375         return r;
376 }
377
378 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
379 {
380         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
381
382         DBG2(dev, "__emac_calc_base_mr1" NL);
383
384         switch(tx_size) {
385         case 2048:
386                 ret |= EMAC_MR1_TFS_2K;
387                 break;
388         default:
389                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
390                        dev->ndev->name, tx_size);
391         }
392
393         switch(rx_size) {
394         case 16384:
395                 ret |= EMAC_MR1_RFS_16K;
396                 break;
397         case 4096:
398                 ret |= EMAC_MR1_RFS_4K;
399                 break;
400         default:
401                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
402                        dev->ndev->name, rx_size);
403         }
404
405         return ret;
406 }
407
408 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
409 {
410         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
411                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
412
413         DBG2(dev, "__emac4_calc_base_mr1" NL);
414
415         switch(tx_size) {
416         case 4096:
417                 ret |= EMAC4_MR1_TFS_4K;
418                 break;
419         case 2048:
420                 ret |= EMAC4_MR1_TFS_2K;
421                 break;
422         default:
423                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
424                        dev->ndev->name, tx_size);
425         }
426
427         switch(rx_size) {
428         case 16384:
429                 ret |= EMAC4_MR1_RFS_16K;
430                 break;
431         case 4096:
432                 ret |= EMAC4_MR1_RFS_4K;
433                 break;
434         case 2048:
435                 ret |= EMAC4_MR1_RFS_2K;
436                 break;
437         default:
438                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
439                        dev->ndev->name, rx_size);
440         }
441
442         return ret;
443 }
444
445 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
446 {
447         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
448                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
449                 __emac_calc_base_mr1(dev, tx_size, rx_size);
450 }
451
452 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
453 {
454         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
455                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
456         else
457                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
458 }
459
460 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
461                                  unsigned int low, unsigned int high)
462 {
463         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
464                 return (low << 22) | ( (high & 0x3ff) << 6);
465         else
466                 return (low << 23) | ( (high & 0x1ff) << 7);
467 }
468
469 static int emac_configure(struct emac_instance *dev)
470 {
471         struct emac_regs __iomem *p = dev->emacp;
472         struct net_device *ndev = dev->ndev;
473         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
474         u32 r, mr1 = 0;
475
476         DBG(dev, "configure" NL);
477
478         if (!link) {
479                 out_be32(&p->mr1, in_be32(&p->mr1)
480                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
481                 udelay(100);
482         } else if (emac_reset(dev) < 0)
483                 return -ETIMEDOUT;
484
485         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
486                 tah_reset(dev->tah_dev);
487
488         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
489             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
490
491         /* Default fifo sizes */
492         tx_size = dev->tx_fifo_size;
493         rx_size = dev->rx_fifo_size;
494
495         /* No link, force loopback */
496         if (!link)
497                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
498
499         /* Check for full duplex */
500         else if (dev->phy.duplex == DUPLEX_FULL)
501                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
502
503         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
504         dev->stop_timeout = STOP_TIMEOUT_10;
505         switch (dev->phy.speed) {
506         case SPEED_1000:
507                 if (emac_phy_gpcs(dev->phy.mode)) {
508                         mr1 |= EMAC_MR1_MF_1000GPCS |
509                                 EMAC_MR1_MF_IPPA(dev->phy.address);
510
511                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
512                          * identify this GPCS PHY later.
513                          */
514                         out_be32(&p->ipcr, 0xdeadbeef);
515                 } else
516                         mr1 |= EMAC_MR1_MF_1000;
517
518                 /* Extended fifo sizes */
519                 tx_size = dev->tx_fifo_size_gige;
520                 rx_size = dev->rx_fifo_size_gige;
521
522                 if (dev->ndev->mtu > ETH_DATA_LEN) {
523                         mr1 |= EMAC_MR1_JPSM;
524                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
525                 } else
526                         dev->stop_timeout = STOP_TIMEOUT_1000;
527                 break;
528         case SPEED_100:
529                 mr1 |= EMAC_MR1_MF_100;
530                 dev->stop_timeout = STOP_TIMEOUT_100;
531                 break;
532         default: /* make gcc happy */
533                 break;
534         }
535
536         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
537                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
538                                 dev->phy.speed);
539         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
540                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
541
542         /* on 40x erratum forces us to NOT use integrated flow control,
543          * let's hope it works on 44x ;)
544          */
545         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
546             dev->phy.duplex == DUPLEX_FULL) {
547                 if (dev->phy.pause)
548                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
549                 else if (dev->phy.asym_pause)
550                         mr1 |= EMAC_MR1_APP;
551         }
552
553         /* Add base settings & fifo sizes & program MR1 */
554         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
555         out_be32(&p->mr1, mr1);
556
557         /* Set individual MAC address */
558         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
559         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
560                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
561                  ndev->dev_addr[5]);
562
563         /* VLAN Tag Protocol ID */
564         out_be32(&p->vtpid, 0x8100);
565
566         /* Receive mode register */
567         r = emac_iff2rmr(ndev);
568         if (r & EMAC_RMR_MAE)
569                 emac_hash_mc(dev);
570         out_be32(&p->rmr, r);
571
572         /* FIFOs thresholds */
573         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
574                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
575                                tx_size / 2 / dev->fifo_entry_size);
576         else
577                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
578                               tx_size / 2 / dev->fifo_entry_size);
579         out_be32(&p->tmr1, r);
580         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
581
582         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
583            there should be still enough space in FIFO to allow the our link
584            partner time to process this frame and also time to send PAUSE
585            frame itself.
586
587            Here is the worst case scenario for the RX FIFO "headroom"
588            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
589
590            1) One maximum-length frame on TX                    1522 bytes
591            2) One PAUSE frame time                                64 bytes
592            3) PAUSE frame decode time allowance                   64 bytes
593            4) One maximum-length frame on RX                    1522 bytes
594            5) Round-trip propagation delay of the link (100Mb)    15 bytes
595            ----------
596            3187 bytes
597
598            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
599            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
600          */
601         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
602                            rx_size / 4 / dev->fifo_entry_size);
603         out_be32(&p->rwmr, r);
604
605         /* Set PAUSE timer to the maximum */
606         out_be32(&p->ptr, 0xffff);
607
608         /* IRQ sources */
609         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
610                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
611                 EMAC_ISR_IRE | EMAC_ISR_TE;
612         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
613             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
614                                                   EMAC4_ISR_RXOE | */;
615         out_be32(&p->iser,  r);
616
617         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
618         if (emac_phy_gpcs(dev->phy.mode))
619                 emac_mii_reset_phy(&dev->phy);
620
621         return 0;
622 }
623
624 static void emac_reinitialize(struct emac_instance *dev)
625 {
626         DBG(dev, "reinitialize" NL);
627
628         emac_netif_stop(dev);
629         if (!emac_configure(dev)) {
630                 emac_tx_enable(dev);
631                 emac_rx_enable(dev);
632         }
633         emac_netif_start(dev);
634 }
635
636 static void emac_full_tx_reset(struct emac_instance *dev)
637 {
638         DBG(dev, "full_tx_reset" NL);
639
640         emac_tx_disable(dev);
641         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
642         emac_clean_tx_ring(dev);
643         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
644
645         emac_configure(dev);
646
647         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
648         emac_tx_enable(dev);
649         emac_rx_enable(dev);
650 }
651
652 static void emac_reset_work(struct work_struct *work)
653 {
654         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
655
656         DBG(dev, "reset_work" NL);
657
658         mutex_lock(&dev->link_lock);
659         if (dev->opened) {
660                 emac_netif_stop(dev);
661                 emac_full_tx_reset(dev);
662                 emac_netif_start(dev);
663         }
664         mutex_unlock(&dev->link_lock);
665 }
666
667 static void emac_tx_timeout(struct net_device *ndev)
668 {
669         struct emac_instance *dev = netdev_priv(ndev);
670
671         DBG(dev, "tx_timeout" NL);
672
673         schedule_work(&dev->reset_work);
674 }
675
676
677 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
678 {
679         int done = !!(stacr & EMAC_STACR_OC);
680
681         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
682                 done = !done;
683
684         return done;
685 };
686
687 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
688 {
689         struct emac_regs __iomem *p = dev->emacp;
690         u32 r = 0;
691         int n, err = -ETIMEDOUT;
692
693         mutex_lock(&dev->mdio_lock);
694
695         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
696
697         /* Enable proper MDIO port */
698         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
699                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
700         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
701                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
702
703         /* Wait for management interface to become idle */
704         n = 10;
705         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
706                 udelay(1);
707                 if (!--n) {
708                         DBG2(dev, " -> timeout wait idle\n");
709                         goto bail;
710                 }
711         }
712
713         /* Issue read command */
714         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
715                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
716         else
717                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
718         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
719                 r |= EMAC_STACR_OC;
720         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
721                 r |= EMACX_STACR_STAC_READ;
722         else
723                 r |= EMAC_STACR_STAC_READ;
724         r |= (reg & EMAC_STACR_PRA_MASK)
725                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
726         out_be32(&p->stacr, r);
727
728         /* Wait for read to complete */
729         n = 100;
730         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
731                 udelay(1);
732                 if (!--n) {
733                         DBG2(dev, " -> timeout wait complete\n");
734                         goto bail;
735                 }
736         }
737
738         if (unlikely(r & EMAC_STACR_PHYE)) {
739                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
740                 err = -EREMOTEIO;
741                 goto bail;
742         }
743
744         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
745
746         DBG2(dev, "mdio_read -> %04x" NL, r);
747         err = 0;
748  bail:
749         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
750                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
751         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
752                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
753         mutex_unlock(&dev->mdio_lock);
754
755         return err == 0 ? r : err;
756 }
757
758 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
759                               u16 val)
760 {
761         struct emac_regs __iomem *p = dev->emacp;
762         u32 r = 0;
763         int n, err = -ETIMEDOUT;
764
765         mutex_lock(&dev->mdio_lock);
766
767         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
768
769         /* Enable proper MDIO port */
770         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
771                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
772         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
773                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
774
775         /* Wait for management interface to be idle */
776         n = 10;
777         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
778                 udelay(1);
779                 if (!--n) {
780                         DBG2(dev, " -> timeout wait idle\n");
781                         goto bail;
782                 }
783         }
784
785         /* Issue write command */
786         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
787                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
788         else
789                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
790         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
791                 r |= EMAC_STACR_OC;
792         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
793                 r |= EMACX_STACR_STAC_WRITE;
794         else
795                 r |= EMAC_STACR_STAC_WRITE;
796         r |= (reg & EMAC_STACR_PRA_MASK) |
797                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
798                 (val << EMAC_STACR_PHYD_SHIFT);
799         out_be32(&p->stacr, r);
800
801         /* Wait for write to complete */
802         n = 100;
803         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
804                 udelay(1);
805                 if (!--n) {
806                         DBG2(dev, " -> timeout wait complete\n");
807                         goto bail;
808                 }
809         }
810         err = 0;
811  bail:
812         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
813                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
814         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
815                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
816         mutex_unlock(&dev->mdio_lock);
817 }
818
819 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
820 {
821         struct emac_instance *dev = netdev_priv(ndev);
822         int res;
823
824         res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
825                                (u8) id, (u8) reg);
826         return res;
827 }
828
829 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
830 {
831         struct emac_instance *dev = netdev_priv(ndev);
832
833         __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
834                           (u8) id, (u8) reg, (u16) val);
835 }
836
837 /* Tx lock BH */
838 static void __emac_set_multicast_list(struct emac_instance *dev)
839 {
840         struct emac_regs __iomem *p = dev->emacp;
841         u32 rmr = emac_iff2rmr(dev->ndev);
842
843         DBG(dev, "__multicast %08x" NL, rmr);
844
845         /* I decided to relax register access rules here to avoid
846          * full EMAC reset.
847          *
848          * There is a real problem with EMAC4 core if we use MWSW_001 bit
849          * in MR1 register and do a full EMAC reset.
850          * One TX BD status update is delayed and, after EMAC reset, it
851          * never happens, resulting in TX hung (it'll be recovered by TX
852          * timeout handler eventually, but this is just gross).
853          * So we either have to do full TX reset or try to cheat here :)
854          *
855          * The only required change is to RX mode register, so I *think* all
856          * we need is just to stop RX channel. This seems to work on all
857          * tested SoCs.                                                --ebs
858          *
859          * If we need the full reset, we might just trigger the workqueue
860          * and do it async... a bit nasty but should work --BenH
861          */
862         dev->mcast_pending = 0;
863         emac_rx_disable(dev);
864         if (rmr & EMAC_RMR_MAE)
865                 emac_hash_mc(dev);
866         out_be32(&p->rmr, rmr);
867         emac_rx_enable(dev);
868 }
869
870 /* Tx lock BH */
871 static void emac_set_multicast_list(struct net_device *ndev)
872 {
873         struct emac_instance *dev = netdev_priv(ndev);
874
875         DBG(dev, "multicast" NL);
876
877         BUG_ON(!netif_running(dev->ndev));
878
879         if (dev->no_mcast) {
880                 dev->mcast_pending = 1;
881                 return;
882         }
883         __emac_set_multicast_list(dev);
884 }
885
886 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
887 {
888         int rx_sync_size = emac_rx_sync_size(new_mtu);
889         int rx_skb_size = emac_rx_skb_size(new_mtu);
890         int i, ret = 0;
891
892         mutex_lock(&dev->link_lock);
893         emac_netif_stop(dev);
894         emac_rx_disable(dev);
895         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
896
897         if (dev->rx_sg_skb) {
898                 ++dev->estats.rx_dropped_resize;
899                 dev_kfree_skb(dev->rx_sg_skb);
900                 dev->rx_sg_skb = NULL;
901         }
902
903         /* Make a first pass over RX ring and mark BDs ready, dropping
904          * non-processed packets on the way. We need this as a separate pass
905          * to simplify error recovery in the case of allocation failure later.
906          */
907         for (i = 0; i < NUM_RX_BUFF; ++i) {
908                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
909                         ++dev->estats.rx_dropped_resize;
910
911                 dev->rx_desc[i].data_len = 0;
912                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
913                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
914         }
915
916         /* Reallocate RX ring only if bigger skb buffers are required */
917         if (rx_skb_size <= dev->rx_skb_size)
918                 goto skip;
919
920         /* Second pass, allocate new skbs */
921         for (i = 0; i < NUM_RX_BUFF; ++i) {
922                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
923                 if (!skb) {
924                         ret = -ENOMEM;
925                         goto oom;
926                 }
927
928                 BUG_ON(!dev->rx_skb[i]);
929                 dev_kfree_skb(dev->rx_skb[i]);
930
931                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
932                 dev->rx_desc[i].data_ptr =
933                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
934                                    DMA_FROM_DEVICE) + 2;
935                 dev->rx_skb[i] = skb;
936         }
937  skip:
938         /* Check if we need to change "Jumbo" bit in MR1 */
939         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
940                 /* This is to prevent starting RX channel in emac_rx_enable() */
941                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
942
943                 dev->ndev->mtu = new_mtu;
944                 emac_full_tx_reset(dev);
945         }
946
947         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
948  oom:
949         /* Restart RX */
950         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
951         dev->rx_slot = 0;
952         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
953         emac_rx_enable(dev);
954         emac_netif_start(dev);
955         mutex_unlock(&dev->link_lock);
956
957         return ret;
958 }
959
960 /* Process ctx, rtnl_lock semaphore */
961 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
962 {
963         struct emac_instance *dev = netdev_priv(ndev);
964         int ret = 0;
965
966         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
967                 return -EINVAL;
968
969         DBG(dev, "change_mtu(%d)" NL, new_mtu);
970
971         if (netif_running(ndev)) {
972                 /* Check if we really need to reinitalize RX ring */
973                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
974                         ret = emac_resize_rx_ring(dev, new_mtu);
975         }
976
977         if (!ret) {
978                 ndev->mtu = new_mtu;
979                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
980                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
981         }
982
983         return ret;
984 }
985
986 static void emac_clean_tx_ring(struct emac_instance *dev)
987 {
988         int i;
989
990         for (i = 0; i < NUM_TX_BUFF; ++i) {
991                 if (dev->tx_skb[i]) {
992                         dev_kfree_skb(dev->tx_skb[i]);
993                         dev->tx_skb[i] = NULL;
994                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
995                                 ++dev->estats.tx_dropped;
996                 }
997                 dev->tx_desc[i].ctrl = 0;
998                 dev->tx_desc[i].data_ptr = 0;
999         }
1000 }
1001
1002 static void emac_clean_rx_ring(struct emac_instance *dev)
1003 {
1004         int i;
1005
1006         for (i = 0; i < NUM_RX_BUFF; ++i)
1007                 if (dev->rx_skb[i]) {
1008                         dev->rx_desc[i].ctrl = 0;
1009                         dev_kfree_skb(dev->rx_skb[i]);
1010                         dev->rx_skb[i] = NULL;
1011                         dev->rx_desc[i].data_ptr = 0;
1012                 }
1013
1014         if (dev->rx_sg_skb) {
1015                 dev_kfree_skb(dev->rx_sg_skb);
1016                 dev->rx_sg_skb = NULL;
1017         }
1018 }
1019
1020 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1021                                     gfp_t flags)
1022 {
1023         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1024         if (unlikely(!skb))
1025                 return -ENOMEM;
1026
1027         dev->rx_skb[slot] = skb;
1028         dev->rx_desc[slot].data_len = 0;
1029
1030         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1031         dev->rx_desc[slot].data_ptr =
1032             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1033                            DMA_FROM_DEVICE) + 2;
1034         wmb();
1035         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1036             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1037
1038         return 0;
1039 }
1040
1041 static void emac_print_link_status(struct emac_instance *dev)
1042 {
1043         if (netif_carrier_ok(dev->ndev))
1044                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1045                        dev->ndev->name, dev->phy.speed,
1046                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1047                        dev->phy.pause ? ", pause enabled" :
1048                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1049         else
1050                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1051 }
1052
1053 /* Process ctx, rtnl_lock semaphore */
1054 static int emac_open(struct net_device *ndev)
1055 {
1056         struct emac_instance *dev = netdev_priv(ndev);
1057         int err, i;
1058
1059         DBG(dev, "open" NL);
1060
1061         /* Setup error IRQ handler */
1062         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1063         if (err) {
1064                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1065                        ndev->name, dev->emac_irq);
1066                 return err;
1067         }
1068
1069         /* Allocate RX ring */
1070         for (i = 0; i < NUM_RX_BUFF; ++i)
1071                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1072                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1073                                ndev->name);
1074                         goto oom;
1075                 }
1076
1077         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1078         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1079         dev->rx_sg_skb = NULL;
1080
1081         mutex_lock(&dev->link_lock);
1082         dev->opened = 1;
1083
1084         /* Start PHY polling now.
1085          */
1086         if (dev->phy.address >= 0) {
1087                 int link_poll_interval;
1088                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1089                         dev->phy.def->ops->read_link(&dev->phy);
1090                         netif_carrier_on(dev->ndev);
1091                         link_poll_interval = PHY_POLL_LINK_ON;
1092                 } else {
1093                         netif_carrier_off(dev->ndev);
1094                         link_poll_interval = PHY_POLL_LINK_OFF;
1095                 }
1096                 dev->link_polling = 1;
1097                 wmb();
1098                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1099                 emac_print_link_status(dev);
1100         } else
1101                 netif_carrier_on(dev->ndev);
1102
1103         emac_configure(dev);
1104         mal_poll_add(dev->mal, &dev->commac);
1105         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1106         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1107         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1108         emac_tx_enable(dev);
1109         emac_rx_enable(dev);
1110         emac_netif_start(dev);
1111
1112         mutex_unlock(&dev->link_lock);
1113
1114         return 0;
1115  oom:
1116         emac_clean_rx_ring(dev);
1117         free_irq(dev->emac_irq, dev);
1118
1119         return -ENOMEM;
1120 }
1121
1122 /* BHs disabled */
1123 #if 0
1124 static int emac_link_differs(struct emac_instance *dev)
1125 {
1126         u32 r = in_be32(&dev->emacp->mr1);
1127
1128         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1129         int speed, pause, asym_pause;
1130
1131         if (r & EMAC_MR1_MF_1000)
1132                 speed = SPEED_1000;
1133         else if (r & EMAC_MR1_MF_100)
1134                 speed = SPEED_100;
1135         else
1136                 speed = SPEED_10;
1137
1138         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1139         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1140                 pause = 1;
1141                 asym_pause = 0;
1142                 break;
1143         case EMAC_MR1_APP:
1144                 pause = 0;
1145                 asym_pause = 1;
1146                 break;
1147         default:
1148                 pause = asym_pause = 0;
1149         }
1150         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1151             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1152 }
1153 #endif
1154
1155 static void emac_link_timer(struct work_struct *work)
1156 {
1157         struct emac_instance *dev =
1158                 container_of((struct delayed_work *)work,
1159                              struct emac_instance, link_work);
1160         int link_poll_interval;
1161
1162         mutex_lock(&dev->link_lock);
1163         DBG2(dev, "link timer" NL);
1164
1165         if (!dev->opened)
1166                 goto bail;
1167
1168         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1169                 if (!netif_carrier_ok(dev->ndev)) {
1170                         /* Get new link parameters */
1171                         dev->phy.def->ops->read_link(&dev->phy);
1172
1173                         netif_carrier_on(dev->ndev);
1174                         emac_netif_stop(dev);
1175                         emac_full_tx_reset(dev);
1176                         emac_netif_start(dev);
1177                         emac_print_link_status(dev);
1178                 }
1179                 link_poll_interval = PHY_POLL_LINK_ON;
1180         } else {
1181                 if (netif_carrier_ok(dev->ndev)) {
1182                         netif_carrier_off(dev->ndev);
1183                         netif_tx_disable(dev->ndev);
1184                         emac_reinitialize(dev);
1185                         emac_print_link_status(dev);
1186                 }
1187                 link_poll_interval = PHY_POLL_LINK_OFF;
1188         }
1189         schedule_delayed_work(&dev->link_work, link_poll_interval);
1190  bail:
1191         mutex_unlock(&dev->link_lock);
1192 }
1193
1194 static void emac_force_link_update(struct emac_instance *dev)
1195 {
1196         netif_carrier_off(dev->ndev);
1197         smp_rmb();
1198         if (dev->link_polling) {
1199                 cancel_rearming_delayed_work(&dev->link_work);
1200                 if (dev->link_polling)
1201                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1202         }
1203 }
1204
1205 /* Process ctx, rtnl_lock semaphore */
1206 static int emac_close(struct net_device *ndev)
1207 {
1208         struct emac_instance *dev = netdev_priv(ndev);
1209
1210         DBG(dev, "close" NL);
1211
1212         if (dev->phy.address >= 0) {
1213                 dev->link_polling = 0;
1214                 cancel_rearming_delayed_work(&dev->link_work);
1215         }
1216         mutex_lock(&dev->link_lock);
1217         emac_netif_stop(dev);
1218         dev->opened = 0;
1219         mutex_unlock(&dev->link_lock);
1220
1221         emac_rx_disable(dev);
1222         emac_tx_disable(dev);
1223         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1224         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1225         mal_poll_del(dev->mal, &dev->commac);
1226
1227         emac_clean_tx_ring(dev);
1228         emac_clean_rx_ring(dev);
1229
1230         free_irq(dev->emac_irq, dev);
1231
1232         return 0;
1233 }
1234
1235 static inline u16 emac_tx_csum(struct emac_instance *dev,
1236                                struct sk_buff *skb)
1237 {
1238         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1239                              skb->ip_summed == CHECKSUM_PARTIAL)) {
1240                 ++dev->stats.tx_packets_csum;
1241                 return EMAC_TX_CTRL_TAH_CSUM;
1242         }
1243         return 0;
1244 }
1245
1246 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1247 {
1248         struct emac_regs __iomem *p = dev->emacp;
1249         struct net_device *ndev = dev->ndev;
1250
1251         /* Send the packet out. If the if makes a significant perf
1252          * difference, then we can store the TMR0 value in "dev"
1253          * instead
1254          */
1255         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1256                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1257         else
1258                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1259
1260         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1261                 netif_stop_queue(ndev);
1262                 DBG2(dev, "stopped TX queue" NL);
1263         }
1264
1265         ndev->trans_start = jiffies;
1266         ++dev->stats.tx_packets;
1267         dev->stats.tx_bytes += len;
1268
1269         return 0;
1270 }
1271
1272 /* Tx lock BH */
1273 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1274 {
1275         struct emac_instance *dev = netdev_priv(ndev);
1276         unsigned int len = skb->len;
1277         int slot;
1278
1279         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1280             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1281
1282         slot = dev->tx_slot++;
1283         if (dev->tx_slot == NUM_TX_BUFF) {
1284                 dev->tx_slot = 0;
1285                 ctrl |= MAL_TX_CTRL_WRAP;
1286         }
1287
1288         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1289
1290         dev->tx_skb[slot] = skb;
1291         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1292                                                      skb->data, len,
1293                                                      DMA_TO_DEVICE);
1294         dev->tx_desc[slot].data_len = (u16) len;
1295         wmb();
1296         dev->tx_desc[slot].ctrl = ctrl;
1297
1298         return emac_xmit_finish(dev, len);
1299 }
1300
1301 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1302 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1303                                   u32 pd, int len, int last, u16 base_ctrl)
1304 {
1305         while (1) {
1306                 u16 ctrl = base_ctrl;
1307                 int chunk = min(len, MAL_MAX_TX_SIZE);
1308                 len -= chunk;
1309
1310                 slot = (slot + 1) % NUM_TX_BUFF;
1311
1312                 if (last && !len)
1313                         ctrl |= MAL_TX_CTRL_LAST;
1314                 if (slot == NUM_TX_BUFF - 1)
1315                         ctrl |= MAL_TX_CTRL_WRAP;
1316
1317                 dev->tx_skb[slot] = NULL;
1318                 dev->tx_desc[slot].data_ptr = pd;
1319                 dev->tx_desc[slot].data_len = (u16) chunk;
1320                 dev->tx_desc[slot].ctrl = ctrl;
1321                 ++dev->tx_cnt;
1322
1323                 if (!len)
1324                         break;
1325
1326                 pd += chunk;
1327         }
1328         return slot;
1329 }
1330
1331 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1332 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1333 {
1334         struct emac_instance *dev = netdev_priv(ndev);
1335         int nr_frags = skb_shinfo(skb)->nr_frags;
1336         int len = skb->len, chunk;
1337         int slot, i;
1338         u16 ctrl;
1339         u32 pd;
1340
1341         /* This is common "fast" path */
1342         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1343                 return emac_start_xmit(skb, ndev);
1344
1345         len -= skb->data_len;
1346
1347         /* Note, this is only an *estimation*, we can still run out of empty
1348          * slots because of the additional fragmentation into
1349          * MAL_MAX_TX_SIZE-sized chunks
1350          */
1351         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1352                 goto stop_queue;
1353
1354         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1355             emac_tx_csum(dev, skb);
1356         slot = dev->tx_slot;
1357
1358         /* skb data */
1359         dev->tx_skb[slot] = NULL;
1360         chunk = min(len, MAL_MAX_TX_SIZE);
1361         dev->tx_desc[slot].data_ptr = pd =
1362             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1363         dev->tx_desc[slot].data_len = (u16) chunk;
1364         len -= chunk;
1365         if (unlikely(len))
1366                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1367                                        ctrl);
1368         /* skb fragments */
1369         for (i = 0; i < nr_frags; ++i) {
1370                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1371                 len = frag->size;
1372
1373                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1374                         goto undo_frame;
1375
1376                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1377                                   DMA_TO_DEVICE);
1378
1379                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1380                                        ctrl);
1381         }
1382
1383         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1384
1385         /* Attach skb to the last slot so we don't release it too early */
1386         dev->tx_skb[slot] = skb;
1387
1388         /* Send the packet out */
1389         if (dev->tx_slot == NUM_TX_BUFF - 1)
1390                 ctrl |= MAL_TX_CTRL_WRAP;
1391         wmb();
1392         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1393         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1394
1395         return emac_xmit_finish(dev, skb->len);
1396
1397  undo_frame:
1398         /* Well, too bad. Our previous estimation was overly optimistic.
1399          * Undo everything.
1400          */
1401         while (slot != dev->tx_slot) {
1402                 dev->tx_desc[slot].ctrl = 0;
1403                 --dev->tx_cnt;
1404                 if (--slot < 0)
1405                         slot = NUM_TX_BUFF - 1;
1406         }
1407         ++dev->estats.tx_undo;
1408
1409  stop_queue:
1410         netif_stop_queue(ndev);
1411         DBG2(dev, "stopped TX queue" NL);
1412         return 1;
1413 }
1414 #else
1415 # define emac_start_xmit_sg     emac_start_xmit
1416 #endif  /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1417
1418 /* Tx lock BHs */
1419 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1420 {
1421         struct emac_error_stats *st = &dev->estats;
1422
1423         DBG(dev, "BD TX error %04x" NL, ctrl);
1424
1425         ++st->tx_bd_errors;
1426         if (ctrl & EMAC_TX_ST_BFCS)
1427                 ++st->tx_bd_bad_fcs;
1428         if (ctrl & EMAC_TX_ST_LCS)
1429                 ++st->tx_bd_carrier_loss;
1430         if (ctrl & EMAC_TX_ST_ED)
1431                 ++st->tx_bd_excessive_deferral;
1432         if (ctrl & EMAC_TX_ST_EC)
1433                 ++st->tx_bd_excessive_collisions;
1434         if (ctrl & EMAC_TX_ST_LC)
1435                 ++st->tx_bd_late_collision;
1436         if (ctrl & EMAC_TX_ST_MC)
1437                 ++st->tx_bd_multple_collisions;
1438         if (ctrl & EMAC_TX_ST_SC)
1439                 ++st->tx_bd_single_collision;
1440         if (ctrl & EMAC_TX_ST_UR)
1441                 ++st->tx_bd_underrun;
1442         if (ctrl & EMAC_TX_ST_SQE)
1443                 ++st->tx_bd_sqe;
1444 }
1445
1446 static void emac_poll_tx(void *param)
1447 {
1448         struct emac_instance *dev = param;
1449         u32 bad_mask;
1450
1451         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1452
1453         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1454                 bad_mask = EMAC_IS_BAD_TX_TAH;
1455         else
1456                 bad_mask = EMAC_IS_BAD_TX;
1457
1458         netif_tx_lock_bh(dev->ndev);
1459         if (dev->tx_cnt) {
1460                 u16 ctrl;
1461                 int slot = dev->ack_slot, n = 0;
1462         again:
1463                 ctrl = dev->tx_desc[slot].ctrl;
1464                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1465                         struct sk_buff *skb = dev->tx_skb[slot];
1466                         ++n;
1467
1468                         if (skb) {
1469                                 dev_kfree_skb(skb);
1470                                 dev->tx_skb[slot] = NULL;
1471                         }
1472                         slot = (slot + 1) % NUM_TX_BUFF;
1473
1474                         if (unlikely(ctrl & bad_mask))
1475                                 emac_parse_tx_error(dev, ctrl);
1476
1477                         if (--dev->tx_cnt)
1478                                 goto again;
1479                 }
1480                 if (n) {
1481                         dev->ack_slot = slot;
1482                         if (netif_queue_stopped(dev->ndev) &&
1483                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1484                                 netif_wake_queue(dev->ndev);
1485
1486                         DBG2(dev, "tx %d pkts" NL, n);
1487                 }
1488         }
1489         netif_tx_unlock_bh(dev->ndev);
1490 }
1491
1492 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1493                                        int len)
1494 {
1495         struct sk_buff *skb = dev->rx_skb[slot];
1496
1497         DBG2(dev, "recycle %d %d" NL, slot, len);
1498
1499         if (len)
1500                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1501                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1502
1503         dev->rx_desc[slot].data_len = 0;
1504         wmb();
1505         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1506             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1507 }
1508
1509 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1510 {
1511         struct emac_error_stats *st = &dev->estats;
1512
1513         DBG(dev, "BD RX error %04x" NL, ctrl);
1514
1515         ++st->rx_bd_errors;
1516         if (ctrl & EMAC_RX_ST_OE)
1517                 ++st->rx_bd_overrun;
1518         if (ctrl & EMAC_RX_ST_BP)
1519                 ++st->rx_bd_bad_packet;
1520         if (ctrl & EMAC_RX_ST_RP)
1521                 ++st->rx_bd_runt_packet;
1522         if (ctrl & EMAC_RX_ST_SE)
1523                 ++st->rx_bd_short_event;
1524         if (ctrl & EMAC_RX_ST_AE)
1525                 ++st->rx_bd_alignment_error;
1526         if (ctrl & EMAC_RX_ST_BFCS)
1527                 ++st->rx_bd_bad_fcs;
1528         if (ctrl & EMAC_RX_ST_PTL)
1529                 ++st->rx_bd_packet_too_long;
1530         if (ctrl & EMAC_RX_ST_ORE)
1531                 ++st->rx_bd_out_of_range;
1532         if (ctrl & EMAC_RX_ST_IRE)
1533                 ++st->rx_bd_in_range;
1534 }
1535
1536 static inline void emac_rx_csum(struct emac_instance *dev,
1537                                 struct sk_buff *skb, u16 ctrl)
1538 {
1539 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1540         if (!ctrl && dev->tah_dev) {
1541                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1542                 ++dev->stats.rx_packets_csum;
1543         }
1544 #endif
1545 }
1546
1547 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1548 {
1549         if (likely(dev->rx_sg_skb != NULL)) {
1550                 int len = dev->rx_desc[slot].data_len;
1551                 int tot_len = dev->rx_sg_skb->len + len;
1552
1553                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1554                         ++dev->estats.rx_dropped_mtu;
1555                         dev_kfree_skb(dev->rx_sg_skb);
1556                         dev->rx_sg_skb = NULL;
1557                 } else {
1558                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1559                                          dev->rx_skb[slot]->data, len);
1560                         skb_put(dev->rx_sg_skb, len);
1561                         emac_recycle_rx_skb(dev, slot, len);
1562                         return 0;
1563                 }
1564         }
1565         emac_recycle_rx_skb(dev, slot, 0);
1566         return -1;
1567 }
1568
1569 /* NAPI poll context */
1570 static int emac_poll_rx(void *param, int budget)
1571 {
1572         struct emac_instance *dev = param;
1573         int slot = dev->rx_slot, received = 0;
1574
1575         DBG2(dev, "poll_rx(%d)" NL, budget);
1576
1577  again:
1578         while (budget > 0) {
1579                 int len;
1580                 struct sk_buff *skb;
1581                 u16 ctrl = dev->rx_desc[slot].ctrl;
1582
1583                 if (ctrl & MAL_RX_CTRL_EMPTY)
1584                         break;
1585
1586                 skb = dev->rx_skb[slot];
1587                 mb();
1588                 len = dev->rx_desc[slot].data_len;
1589
1590                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1591                         goto sg;
1592
1593                 ctrl &= EMAC_BAD_RX_MASK;
1594                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1595                         emac_parse_rx_error(dev, ctrl);
1596                         ++dev->estats.rx_dropped_error;
1597                         emac_recycle_rx_skb(dev, slot, 0);
1598                         len = 0;
1599                         goto next;
1600                 }
1601
1602                 if (len && len < EMAC_RX_COPY_THRESH) {
1603                         struct sk_buff *copy_skb =
1604                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1605                         if (unlikely(!copy_skb))
1606                                 goto oom;
1607
1608                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1609                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1610                                          len + 2);
1611                         emac_recycle_rx_skb(dev, slot, len);
1612                         skb = copy_skb;
1613                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1614                         goto oom;
1615
1616                 skb_put(skb, len);
1617         push_packet:
1618                 skb->dev = dev->ndev;
1619                 skb->protocol = eth_type_trans(skb, dev->ndev);
1620                 emac_rx_csum(dev, skb, ctrl);
1621
1622                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1623                         ++dev->estats.rx_dropped_stack;
1624         next:
1625                 ++dev->stats.rx_packets;
1626         skip:
1627                 dev->stats.rx_bytes += len;
1628                 slot = (slot + 1) % NUM_RX_BUFF;
1629                 --budget;
1630                 ++received;
1631                 continue;
1632         sg:
1633                 if (ctrl & MAL_RX_CTRL_FIRST) {
1634                         BUG_ON(dev->rx_sg_skb);
1635                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1636                                 DBG(dev, "rx OOM %d" NL, slot);
1637                                 ++dev->estats.rx_dropped_oom;
1638                                 emac_recycle_rx_skb(dev, slot, 0);
1639                         } else {
1640                                 dev->rx_sg_skb = skb;
1641                                 skb_put(skb, len);
1642                         }
1643                 } else if (!emac_rx_sg_append(dev, slot) &&
1644                            (ctrl & MAL_RX_CTRL_LAST)) {
1645
1646                         skb = dev->rx_sg_skb;
1647                         dev->rx_sg_skb = NULL;
1648
1649                         ctrl &= EMAC_BAD_RX_MASK;
1650                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1651                                 emac_parse_rx_error(dev, ctrl);
1652                                 ++dev->estats.rx_dropped_error;
1653                                 dev_kfree_skb(skb);
1654                                 len = 0;
1655                         } else
1656                                 goto push_packet;
1657                 }
1658                 goto skip;
1659         oom:
1660                 DBG(dev, "rx OOM %d" NL, slot);
1661                 /* Drop the packet and recycle skb */
1662                 ++dev->estats.rx_dropped_oom;
1663                 emac_recycle_rx_skb(dev, slot, 0);
1664                 goto next;
1665         }
1666
1667         if (received) {
1668                 DBG2(dev, "rx %d BDs" NL, received);
1669                 dev->rx_slot = slot;
1670         }
1671
1672         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1673                 mb();
1674                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1675                         DBG2(dev, "rx restart" NL);
1676                         received = 0;
1677                         goto again;
1678                 }
1679
1680                 if (dev->rx_sg_skb) {
1681                         DBG2(dev, "dropping partial rx packet" NL);
1682                         ++dev->estats.rx_dropped_error;
1683                         dev_kfree_skb(dev->rx_sg_skb);
1684                         dev->rx_sg_skb = NULL;
1685                 }
1686
1687                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1688                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1689                 emac_rx_enable(dev);
1690                 dev->rx_slot = 0;
1691         }
1692         return received;
1693 }
1694
1695 /* NAPI poll context */
1696 static int emac_peek_rx(void *param)
1697 {
1698         struct emac_instance *dev = param;
1699
1700         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1701 }
1702
1703 /* NAPI poll context */
1704 static int emac_peek_rx_sg(void *param)
1705 {
1706         struct emac_instance *dev = param;
1707
1708         int slot = dev->rx_slot;
1709         while (1) {
1710                 u16 ctrl = dev->rx_desc[slot].ctrl;
1711                 if (ctrl & MAL_RX_CTRL_EMPTY)
1712                         return 0;
1713                 else if (ctrl & MAL_RX_CTRL_LAST)
1714                         return 1;
1715
1716                 slot = (slot + 1) % NUM_RX_BUFF;
1717
1718                 /* I'm just being paranoid here :) */
1719                 if (unlikely(slot == dev->rx_slot))
1720                         return 0;
1721         }
1722 }
1723
1724 /* Hard IRQ */
1725 static void emac_rxde(void *param)
1726 {
1727         struct emac_instance *dev = param;
1728
1729         ++dev->estats.rx_stopped;
1730         emac_rx_disable_async(dev);
1731 }
1732
1733 /* Hard IRQ */
1734 static irqreturn_t emac_irq(int irq, void *dev_instance)
1735 {
1736         struct emac_instance *dev = dev_instance;
1737         struct emac_regs __iomem *p = dev->emacp;
1738         struct emac_error_stats *st = &dev->estats;
1739         u32 isr;
1740
1741         spin_lock(&dev->lock);
1742
1743         isr = in_be32(&p->isr);
1744         out_be32(&p->isr, isr);
1745
1746         DBG(dev, "isr = %08x" NL, isr);
1747
1748         if (isr & EMAC4_ISR_TXPE)
1749                 ++st->tx_parity;
1750         if (isr & EMAC4_ISR_RXPE)
1751                 ++st->rx_parity;
1752         if (isr & EMAC4_ISR_TXUE)
1753                 ++st->tx_underrun;
1754         if (isr & EMAC4_ISR_RXOE)
1755                 ++st->rx_fifo_overrun;
1756         if (isr & EMAC_ISR_OVR)
1757                 ++st->rx_overrun;
1758         if (isr & EMAC_ISR_BP)
1759                 ++st->rx_bad_packet;
1760         if (isr & EMAC_ISR_RP)
1761                 ++st->rx_runt_packet;
1762         if (isr & EMAC_ISR_SE)
1763                 ++st->rx_short_event;
1764         if (isr & EMAC_ISR_ALE)
1765                 ++st->rx_alignment_error;
1766         if (isr & EMAC_ISR_BFCS)
1767                 ++st->rx_bad_fcs;
1768         if (isr & EMAC_ISR_PTLE)
1769                 ++st->rx_packet_too_long;
1770         if (isr & EMAC_ISR_ORE)
1771                 ++st->rx_out_of_range;
1772         if (isr & EMAC_ISR_IRE)
1773                 ++st->rx_in_range;
1774         if (isr & EMAC_ISR_SQE)
1775                 ++st->tx_sqe;
1776         if (isr & EMAC_ISR_TE)
1777                 ++st->tx_errors;
1778
1779         spin_unlock(&dev->lock);
1780
1781         return IRQ_HANDLED;
1782 }
1783
1784 static struct net_device_stats *emac_stats(struct net_device *ndev)
1785 {
1786         struct emac_instance *dev = netdev_priv(ndev);
1787         struct emac_stats *st = &dev->stats;
1788         struct emac_error_stats *est = &dev->estats;
1789         struct net_device_stats *nst = &dev->nstats;
1790         unsigned long flags;
1791
1792         DBG2(dev, "stats" NL);
1793
1794         /* Compute "legacy" statistics */
1795         spin_lock_irqsave(&dev->lock, flags);
1796         nst->rx_packets = (unsigned long)st->rx_packets;
1797         nst->rx_bytes = (unsigned long)st->rx_bytes;
1798         nst->tx_packets = (unsigned long)st->tx_packets;
1799         nst->tx_bytes = (unsigned long)st->tx_bytes;
1800         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1801                                           est->rx_dropped_error +
1802                                           est->rx_dropped_resize +
1803                                           est->rx_dropped_mtu);
1804         nst->tx_dropped = (unsigned long)est->tx_dropped;
1805
1806         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1807         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1808                                               est->rx_fifo_overrun +
1809                                               est->rx_overrun);
1810         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1811                                                est->rx_alignment_error);
1812         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1813                                              est->rx_bad_fcs);
1814         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1815                                                 est->rx_bd_short_event +
1816                                                 est->rx_bd_packet_too_long +
1817                                                 est->rx_bd_out_of_range +
1818                                                 est->rx_bd_in_range +
1819                                                 est->rx_runt_packet +
1820                                                 est->rx_short_event +
1821                                                 est->rx_packet_too_long +
1822                                                 est->rx_out_of_range +
1823                                                 est->rx_in_range);
1824
1825         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1826         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1827                                               est->tx_underrun);
1828         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1829         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1830                                           est->tx_bd_excessive_collisions +
1831                                           est->tx_bd_late_collision +
1832                                           est->tx_bd_multple_collisions);
1833         spin_unlock_irqrestore(&dev->lock, flags);
1834         return nst;
1835 }
1836
1837 static struct mal_commac_ops emac_commac_ops = {
1838         .poll_tx = &emac_poll_tx,
1839         .poll_rx = &emac_poll_rx,
1840         .peek_rx = &emac_peek_rx,
1841         .rxde = &emac_rxde,
1842 };
1843
1844 static struct mal_commac_ops emac_commac_sg_ops = {
1845         .poll_tx = &emac_poll_tx,
1846         .poll_rx = &emac_poll_rx,
1847         .peek_rx = &emac_peek_rx_sg,
1848         .rxde = &emac_rxde,
1849 };
1850
1851 /* Ethtool support */
1852 static int emac_ethtool_get_settings(struct net_device *ndev,
1853                                      struct ethtool_cmd *cmd)
1854 {
1855         struct emac_instance *dev = netdev_priv(ndev);
1856
1857         cmd->supported = dev->phy.features;
1858         cmd->port = PORT_MII;
1859         cmd->phy_address = dev->phy.address;
1860         cmd->transceiver =
1861             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1862
1863         mutex_lock(&dev->link_lock);
1864         cmd->advertising = dev->phy.advertising;
1865         cmd->autoneg = dev->phy.autoneg;
1866         cmd->speed = dev->phy.speed;
1867         cmd->duplex = dev->phy.duplex;
1868         mutex_unlock(&dev->link_lock);
1869
1870         return 0;
1871 }
1872
1873 static int emac_ethtool_set_settings(struct net_device *ndev,
1874                                      struct ethtool_cmd *cmd)
1875 {
1876         struct emac_instance *dev = netdev_priv(ndev);
1877         u32 f = dev->phy.features;
1878
1879         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1880             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1881
1882         /* Basic sanity checks */
1883         if (dev->phy.address < 0)
1884                 return -EOPNOTSUPP;
1885         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1886                 return -EINVAL;
1887         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1888                 return -EINVAL;
1889         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1890                 return -EINVAL;
1891
1892         if (cmd->autoneg == AUTONEG_DISABLE) {
1893                 switch (cmd->speed) {
1894                 case SPEED_10:
1895                         if (cmd->duplex == DUPLEX_HALF
1896                             && !(f & SUPPORTED_10baseT_Half))
1897                                 return -EINVAL;
1898                         if (cmd->duplex == DUPLEX_FULL
1899                             && !(f & SUPPORTED_10baseT_Full))
1900                                 return -EINVAL;
1901                         break;
1902                 case SPEED_100:
1903                         if (cmd->duplex == DUPLEX_HALF
1904                             && !(f & SUPPORTED_100baseT_Half))
1905                                 return -EINVAL;
1906                         if (cmd->duplex == DUPLEX_FULL
1907                             && !(f & SUPPORTED_100baseT_Full))
1908                                 return -EINVAL;
1909                         break;
1910                 case SPEED_1000:
1911                         if (cmd->duplex == DUPLEX_HALF
1912                             && !(f & SUPPORTED_1000baseT_Half))
1913                                 return -EINVAL;
1914                         if (cmd->duplex == DUPLEX_FULL
1915                             && !(f & SUPPORTED_1000baseT_Full))
1916                                 return -EINVAL;
1917                         break;
1918                 default:
1919                         return -EINVAL;
1920                 }
1921
1922                 mutex_lock(&dev->link_lock);
1923                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1924                                                 cmd->duplex);
1925                 mutex_unlock(&dev->link_lock);
1926
1927         } else {
1928                 if (!(f & SUPPORTED_Autoneg))
1929                         return -EINVAL;
1930
1931                 mutex_lock(&dev->link_lock);
1932                 dev->phy.def->ops->setup_aneg(&dev->phy,
1933                                               (cmd->advertising & f) |
1934                                               (dev->phy.advertising &
1935                                                (ADVERTISED_Pause |
1936                                                 ADVERTISED_Asym_Pause)));
1937                 mutex_unlock(&dev->link_lock);
1938         }
1939         emac_force_link_update(dev);
1940
1941         return 0;
1942 }
1943
1944 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1945                                        struct ethtool_ringparam *rp)
1946 {
1947         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1948         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1949 }
1950
1951 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1952                                         struct ethtool_pauseparam *pp)
1953 {
1954         struct emac_instance *dev = netdev_priv(ndev);
1955
1956         mutex_lock(&dev->link_lock);
1957         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1958             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1959                 pp->autoneg = 1;
1960
1961         if (dev->phy.duplex == DUPLEX_FULL) {
1962                 if (dev->phy.pause)
1963                         pp->rx_pause = pp->tx_pause = 1;
1964                 else if (dev->phy.asym_pause)
1965                         pp->tx_pause = 1;
1966         }
1967         mutex_unlock(&dev->link_lock);
1968 }
1969
1970 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1971 {
1972         struct emac_instance *dev = netdev_priv(ndev);
1973
1974         return dev->tah_dev != NULL;
1975 }
1976
1977 static int emac_get_regs_len(struct emac_instance *dev)
1978 {
1979         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1980                 return sizeof(struct emac_ethtool_regs_subhdr) +
1981                         EMAC4_ETHTOOL_REGS_SIZE;
1982         else
1983                 return sizeof(struct emac_ethtool_regs_subhdr) +
1984                         EMAC_ETHTOOL_REGS_SIZE;
1985 }
1986
1987 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1988 {
1989         struct emac_instance *dev = netdev_priv(ndev);
1990         int size;
1991
1992         size = sizeof(struct emac_ethtool_regs_hdr) +
1993                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1994         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1995                 size += zmii_get_regs_len(dev->zmii_dev);
1996         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1997                 size += rgmii_get_regs_len(dev->rgmii_dev);
1998         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1999                 size += tah_get_regs_len(dev->tah_dev);
2000
2001         return size;
2002 }
2003
2004 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2005 {
2006         struct emac_ethtool_regs_subhdr *hdr = buf;
2007
2008         hdr->index = dev->cell_index;
2009         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2010                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2011                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2012                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2013         } else {
2014                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2015                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2016                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2017         }
2018 }
2019
2020 static void emac_ethtool_get_regs(struct net_device *ndev,
2021                                   struct ethtool_regs *regs, void *buf)
2022 {
2023         struct emac_instance *dev = netdev_priv(ndev);
2024         struct emac_ethtool_regs_hdr *hdr = buf;
2025
2026         hdr->components = 0;
2027         buf = hdr + 1;
2028
2029         buf = mal_dump_regs(dev->mal, buf);
2030         buf = emac_dump_regs(dev, buf);
2031         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2032                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2033                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2034         }
2035         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2036                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2037                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2038         }
2039         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2040                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2041                 buf = tah_dump_regs(dev->tah_dev, buf);
2042         }
2043 }
2044
2045 static int emac_ethtool_nway_reset(struct net_device *ndev)
2046 {
2047         struct emac_instance *dev = netdev_priv(ndev);
2048         int res = 0;
2049
2050         DBG(dev, "nway_reset" NL);
2051
2052         if (dev->phy.address < 0)
2053                 return -EOPNOTSUPP;
2054
2055         mutex_lock(&dev->link_lock);
2056         if (!dev->phy.autoneg) {
2057                 res = -EINVAL;
2058                 goto out;
2059         }
2060
2061         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2062  out:
2063         mutex_unlock(&dev->link_lock);
2064         emac_force_link_update(dev);
2065         return res;
2066 }
2067
2068 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2069 {
2070         return EMAC_ETHTOOL_STATS_COUNT;
2071 }
2072
2073 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2074                                      u8 * buf)
2075 {
2076         if (stringset == ETH_SS_STATS)
2077                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2078 }
2079
2080 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2081                                            struct ethtool_stats *estats,
2082                                            u64 * tmp_stats)
2083 {
2084         struct emac_instance *dev = netdev_priv(ndev);
2085
2086         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2087         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2088         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2089 }
2090
2091 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2092                                      struct ethtool_drvinfo *info)
2093 {
2094         struct emac_instance *dev = netdev_priv(ndev);
2095
2096         strcpy(info->driver, "ibm_emac");
2097         strcpy(info->version, DRV_VERSION);
2098         info->fw_version[0] = '\0';
2099         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2100                 dev->cell_index, dev->ofdev->node->full_name);
2101         info->n_stats = emac_ethtool_get_stats_count(ndev);
2102         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2103 }
2104
2105 static const struct ethtool_ops emac_ethtool_ops = {
2106         .get_settings = emac_ethtool_get_settings,
2107         .set_settings = emac_ethtool_set_settings,
2108         .get_drvinfo = emac_ethtool_get_drvinfo,
2109
2110         .get_regs_len = emac_ethtool_get_regs_len,
2111         .get_regs = emac_ethtool_get_regs,
2112
2113         .nway_reset = emac_ethtool_nway_reset,
2114
2115         .get_ringparam = emac_ethtool_get_ringparam,
2116         .get_pauseparam = emac_ethtool_get_pauseparam,
2117
2118         .get_rx_csum = emac_ethtool_get_rx_csum,
2119
2120         .get_strings = emac_ethtool_get_strings,
2121         .get_stats_count = emac_ethtool_get_stats_count,
2122         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2123
2124         .get_link = ethtool_op_get_link,
2125         .get_tx_csum = ethtool_op_get_tx_csum,
2126         .get_sg = ethtool_op_get_sg,
2127 };
2128
2129 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2130 {
2131         struct emac_instance *dev = netdev_priv(ndev);
2132         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2133
2134         DBG(dev, "ioctl %08x" NL, cmd);
2135
2136         if (dev->phy.address < 0)
2137                 return -EOPNOTSUPP;
2138
2139         switch (cmd) {
2140         case SIOCGMIIPHY:
2141         case SIOCDEVPRIVATE:
2142                 data[0] = dev->phy.address;
2143                 /* Fall through */
2144         case SIOCGMIIREG:
2145         case SIOCDEVPRIVATE + 1:
2146                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2147                 return 0;
2148
2149         case SIOCSMIIREG:
2150         case SIOCDEVPRIVATE + 2:
2151                 if (!capable(CAP_NET_ADMIN))
2152                         return -EPERM;
2153                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2154                 return 0;
2155         default:
2156                 return -EOPNOTSUPP;
2157         }
2158 }
2159
2160 struct emac_depentry {
2161         u32                     phandle;
2162         struct device_node      *node;
2163         struct of_device        *ofdev;
2164         void                    *drvdata;
2165 };
2166
2167 #define EMAC_DEP_MAL_IDX        0
2168 #define EMAC_DEP_ZMII_IDX       1
2169 #define EMAC_DEP_RGMII_IDX      2
2170 #define EMAC_DEP_TAH_IDX        3
2171 #define EMAC_DEP_MDIO_IDX       4
2172 #define EMAC_DEP_PREV_IDX       5
2173 #define EMAC_DEP_COUNT          6
2174
2175 static int __devinit emac_check_deps(struct emac_instance *dev,
2176                                      struct emac_depentry *deps)
2177 {
2178         int i, there = 0;
2179         struct device_node *np;
2180
2181         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2182                 /* no dependency on that item, allright */
2183                 if (deps[i].phandle == 0) {
2184                         there++;
2185                         continue;
2186                 }
2187                 /* special case for blist as the dependency might go away */
2188                 if (i == EMAC_DEP_PREV_IDX) {
2189                         np = *(dev->blist - 1);
2190                         if (np == NULL) {
2191                                 deps[i].phandle = 0;
2192                                 there++;
2193                                 continue;
2194                         }
2195                         if (deps[i].node == NULL)
2196                                 deps[i].node = of_node_get(np);
2197                 }
2198                 if (deps[i].node == NULL)
2199                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2200                 if (deps[i].node == NULL)
2201                         continue;
2202                 if (deps[i].ofdev == NULL)
2203                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2204                 if (deps[i].ofdev == NULL)
2205                         continue;
2206                 if (deps[i].drvdata == NULL)
2207                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2208                 if (deps[i].drvdata != NULL)
2209                         there++;
2210         }
2211         return (there == EMAC_DEP_COUNT);
2212 }
2213
2214 static void emac_put_deps(struct emac_instance *dev)
2215 {
2216         if (dev->mal_dev)
2217                 of_dev_put(dev->mal_dev);
2218         if (dev->zmii_dev)
2219                 of_dev_put(dev->zmii_dev);
2220         if (dev->rgmii_dev)
2221                 of_dev_put(dev->rgmii_dev);
2222         if (dev->mdio_dev)
2223                 of_dev_put(dev->mdio_dev);
2224         if (dev->tah_dev)
2225                 of_dev_put(dev->tah_dev);
2226 }
2227
2228 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2229                                         unsigned long action, void *data)
2230 {
2231         /* We are only intereted in device addition */
2232         if (action == BUS_NOTIFY_BOUND_DRIVER)
2233                 wake_up_all(&emac_probe_wait);
2234         return 0;
2235 }
2236
2237 static struct notifier_block emac_of_bus_notifier = {
2238         .notifier_call = emac_of_bus_notify
2239 };
2240
2241 static int __devinit emac_wait_deps(struct emac_instance *dev)
2242 {
2243         struct emac_depentry deps[EMAC_DEP_COUNT];
2244         int i, err;
2245
2246         memset(&deps, 0, sizeof(deps));
2247
2248         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2249         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2250         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2251         if (dev->tah_ph)
2252                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2253         if (dev->mdio_ph)
2254                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2255         if (dev->blist && dev->blist > emac_boot_list)
2256                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2257         bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2258         wait_event_timeout(emac_probe_wait,
2259                            emac_check_deps(dev, deps),
2260                            EMAC_PROBE_DEP_TIMEOUT);
2261         bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2262         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2263         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2264                 if (deps[i].node)
2265                         of_node_put(deps[i].node);
2266                 if (err && deps[i].ofdev)
2267                         of_dev_put(deps[i].ofdev);
2268         }
2269         if (err == 0) {
2270                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2271                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2272                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2273                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2274                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2275         }
2276         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2277                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2278         return err;
2279 }
2280
2281 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2282                                          u32 *val, int fatal)
2283 {
2284         int len;
2285         const u32 *prop = of_get_property(np, name, &len);
2286         if (prop == NULL || len < sizeof(u32)) {
2287                 if (fatal)
2288                         printk(KERN_ERR "%s: missing %s property\n",
2289                                np->full_name, name);
2290                 return -ENODEV;
2291         }
2292         *val = *prop;
2293         return 0;
2294 }
2295
2296 static int __devinit emac_init_phy(struct emac_instance *dev)
2297 {
2298         struct device_node *np = dev->ofdev->node;
2299         struct net_device *ndev = dev->ndev;
2300         u32 phy_map, adv;
2301         int i;
2302
2303         dev->phy.dev = ndev;
2304         dev->phy.mode = dev->phy_mode;
2305
2306         /* PHY-less configuration.
2307          * XXX I probably should move these settings to the dev tree
2308          */
2309         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2310                 emac_reset(dev);
2311
2312                 /* PHY-less configuration.
2313                  * XXX I probably should move these settings to the dev tree
2314                  */
2315                 dev->phy.address = -1;
2316                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2317                 dev->phy.pause = 1;
2318
2319                 return 0;
2320         }
2321
2322         mutex_lock(&emac_phy_map_lock);
2323         phy_map = dev->phy_map | busy_phy_map;
2324
2325         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2326
2327         dev->phy.mdio_read = emac_mdio_read;
2328         dev->phy.mdio_write = emac_mdio_write;
2329
2330         /* Configure EMAC with defaults so we can at least use MDIO
2331          * This is needed mostly for 440GX
2332          */
2333         if (emac_phy_gpcs(dev->phy.mode)) {
2334                 /* XXX
2335                  * Make GPCS PHY address equal to EMAC index.
2336                  * We probably should take into account busy_phy_map
2337                  * and/or phy_map here.
2338                  *
2339                  * Note that the busy_phy_map is currently global
2340                  * while it should probably be per-ASIC...
2341                  */
2342                 dev->phy.address = dev->cell_index;
2343         }
2344
2345         emac_configure(dev);
2346
2347         if (dev->phy_address != 0xffffffff)
2348                 phy_map = ~(1 << dev->phy_address);
2349
2350         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2351                 if (!(phy_map & 1)) {
2352                         int r;
2353                         busy_phy_map |= 1 << i;
2354
2355                         /* Quick check if there is a PHY at the address */
2356                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2357                         if (r == 0xffff || r < 0)
2358                                 continue;
2359                         if (!emac_mii_phy_probe(&dev->phy, i))
2360                                 break;
2361                 }
2362         mutex_unlock(&emac_phy_map_lock);
2363         if (i == 0x20) {
2364                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2365                 return -ENXIO;
2366         }
2367
2368         /* Init PHY */
2369         if (dev->phy.def->ops->init)
2370                 dev->phy.def->ops->init(&dev->phy);
2371
2372         /* Disable any PHY features not supported by the platform */
2373         dev->phy.def->features &= ~dev->phy_feat_exc;
2374
2375         /* Setup initial link parameters */
2376         if (dev->phy.features & SUPPORTED_Autoneg) {
2377                 adv = dev->phy.features;
2378                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2379                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2380                 /* Restart autonegotiation */
2381                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2382         } else {
2383                 u32 f = dev->phy.def->features;
2384                 int speed = SPEED_10, fd = DUPLEX_HALF;
2385
2386                 /* Select highest supported speed/duplex */
2387                 if (f & SUPPORTED_1000baseT_Full) {
2388                         speed = SPEED_1000;
2389                         fd = DUPLEX_FULL;
2390                 } else if (f & SUPPORTED_1000baseT_Half)
2391                         speed = SPEED_1000;
2392                 else if (f & SUPPORTED_100baseT_Full) {
2393                         speed = SPEED_100;
2394                         fd = DUPLEX_FULL;
2395                 } else if (f & SUPPORTED_100baseT_Half)
2396                         speed = SPEED_100;
2397                 else if (f & SUPPORTED_10baseT_Full)
2398                         fd = DUPLEX_FULL;
2399
2400                 /* Force link parameters */
2401                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2402         }
2403         return 0;
2404 }
2405
2406 static int __devinit emac_init_config(struct emac_instance *dev)
2407 {
2408         struct device_node *np = dev->ofdev->node;
2409         const void *p;
2410         unsigned int plen;
2411         const char *pm, *phy_modes[] = {
2412                 [PHY_MODE_NA] = "",
2413                 [PHY_MODE_MII] = "mii",
2414                 [PHY_MODE_RMII] = "rmii",
2415                 [PHY_MODE_SMII] = "smii",
2416                 [PHY_MODE_RGMII] = "rgmii",
2417                 [PHY_MODE_TBI] = "tbi",
2418                 [PHY_MODE_GMII] = "gmii",
2419                 [PHY_MODE_RTBI] = "rtbi",
2420                 [PHY_MODE_SGMII] = "sgmii",
2421         };
2422
2423         /* Read config from device-tree */
2424         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2425                 return -ENXIO;
2426         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2427                 return -ENXIO;
2428         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2429                 return -ENXIO;
2430         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2431                 return -ENXIO;
2432         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2433                 dev->max_mtu = 1500;
2434         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2435                 dev->rx_fifo_size = 2048;
2436         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2437                 dev->tx_fifo_size = 2048;
2438         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2439                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2440         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2441                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2442         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2443                 dev->phy_address = 0xffffffff;
2444         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2445                 dev->phy_map = 0xffffffff;
2446         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2447                 return -ENXIO;
2448         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2449                 dev->tah_ph = 0;
2450         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2451                 dev->tah_port = 0;
2452         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2453                 dev->mdio_ph = 0;
2454         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2455                 dev->zmii_ph = 0;;
2456         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2457                 dev->zmii_port = 0xffffffff;;
2458         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2459                 dev->rgmii_ph = 0;;
2460         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2461                 dev->rgmii_port = 0xffffffff;;
2462         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2463                 dev->fifo_entry_size = 16;
2464         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2465                 dev->mal_burst_size = 256;
2466
2467         /* PHY mode needs some decoding */
2468         dev->phy_mode = PHY_MODE_NA;
2469         pm = of_get_property(np, "phy-mode", &plen);
2470         if (pm != NULL) {
2471                 int i;
2472                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2473                         if (!strcasecmp(pm, phy_modes[i])) {
2474                                 dev->phy_mode = i;
2475                                 break;
2476                         }
2477         }
2478
2479         /* Backward compat with non-final DT */
2480         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2481                 u32 nmode = *(const u32 *)pm;
2482                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2483                         dev->phy_mode = nmode;
2484         }
2485
2486         /* Check EMAC version */
2487         if (of_device_is_compatible(np, "ibm,emac4"))
2488                 dev->features |= EMAC_FTR_EMAC4;
2489
2490         /* Fixup some feature bits based on the device tree */
2491         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2492                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2493         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2494                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2495
2496         /* CAB lacks the appropriate properties */
2497         if (of_device_is_compatible(np, "ibm,emac-axon"))
2498                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2499                         EMAC_FTR_STACR_OC_INVERT;
2500
2501         /* Enable TAH/ZMII/RGMII features as found */
2502         if (dev->tah_ph != 0) {
2503 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2504                 dev->features |= EMAC_FTR_HAS_TAH;
2505 #else
2506                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2507                        np->full_name);
2508                 return -ENXIO;
2509 #endif
2510         }
2511
2512         if (dev->zmii_ph != 0) {
2513 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2514                 dev->features |= EMAC_FTR_HAS_ZMII;
2515 #else
2516                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2517                        np->full_name);
2518                 return -ENXIO;
2519 #endif
2520         }
2521
2522         if (dev->rgmii_ph != 0) {
2523 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2524                 dev->features |= EMAC_FTR_HAS_RGMII;
2525 #else
2526                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2527                        np->full_name);
2528                 return -ENXIO;
2529 #endif
2530         }
2531
2532         /* Read MAC-address */
2533         p = of_get_property(np, "local-mac-address", NULL);
2534         if (p == NULL) {
2535                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2536                        np->full_name);
2537                 return -ENXIO;
2538         }
2539         memcpy(dev->ndev->dev_addr, p, 6);
2540
2541         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2542         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2543         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2544         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2545         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2546
2547         return 0;
2548 }
2549
2550 static int __devinit emac_probe(struct of_device *ofdev,
2551                                 const struct of_device_id *match)
2552 {
2553         struct net_device *ndev;
2554         struct emac_instance *dev;
2555         struct device_node *np = ofdev->node;
2556         struct device_node **blist = NULL;
2557         int err, i;
2558
2559         /* Skip unused/unwired EMACS */
2560         if (of_get_property(np, "unused", NULL))
2561                 return -ENODEV;
2562
2563         /* Find ourselves in the bootlist if we are there */
2564         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2565                 if (emac_boot_list[i] == np)
2566                         blist = &emac_boot_list[i];
2567
2568         /* Allocate our net_device structure */
2569         err = -ENOMEM;
2570         ndev = alloc_etherdev(sizeof(struct emac_instance));
2571         if (!ndev) {
2572                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2573                        np->full_name);
2574                 goto err_gone;
2575         }
2576         dev = netdev_priv(ndev);
2577         dev->ndev = ndev;
2578         dev->ofdev = ofdev;
2579         dev->blist = blist;
2580         SET_NETDEV_DEV(ndev, &ofdev->dev);
2581
2582         /* Initialize some embedded data structures */
2583         mutex_init(&dev->mdio_lock);
2584         mutex_init(&dev->link_lock);
2585         spin_lock_init(&dev->lock);
2586         INIT_WORK(&dev->reset_work, emac_reset_work);
2587
2588         /* Init various config data based on device-tree */
2589         err = emac_init_config(dev);
2590         if (err != 0)
2591                 goto err_free;
2592
2593         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2594         dev->emac_irq = irq_of_parse_and_map(np, 0);
2595         dev->wol_irq = irq_of_parse_and_map(np, 1);
2596         if (dev->emac_irq == NO_IRQ) {
2597                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2598                 goto err_free;
2599         }
2600         ndev->irq = dev->emac_irq;
2601
2602         /* Map EMAC regs */
2603         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2604                 printk(KERN_ERR "%s: Can't get registers address\n",
2605                        np->full_name);
2606                 goto err_irq_unmap;
2607         }
2608         // TODO : request_mem_region
2609         dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2610         if (dev->emacp == NULL) {
2611                 printk(KERN_ERR "%s: Can't map device registers!\n",
2612                        np->full_name);
2613                 err = -ENOMEM;
2614                 goto err_irq_unmap;
2615         }
2616
2617         /* Wait for dependent devices */
2618         err = emac_wait_deps(dev);
2619         if (err) {
2620                 printk(KERN_ERR
2621                        "%s: Timeout waiting for dependent devices\n",
2622                        np->full_name);
2623                 /*  display more info about what's missing ? */
2624                 goto err_reg_unmap;
2625         }
2626         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2627         if (dev->mdio_dev != NULL)
2628                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2629
2630         /* Register with MAL */
2631         dev->commac.ops = &emac_commac_ops;
2632         dev->commac.dev = dev;
2633         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2634         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2635         err = mal_register_commac(dev->mal, &dev->commac);
2636         if (err) {
2637                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2638                        np->full_name, dev->mal_dev->node->full_name);
2639                 goto err_rel_deps;
2640         }
2641         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2642         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2643
2644         /* Get pointers to BD rings */
2645         dev->tx_desc =
2646             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2647         dev->rx_desc =
2648             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2649
2650         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2651         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2652
2653         /* Clean rings */
2654         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2655         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2656
2657         /* Attach to ZMII, if needed */
2658         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2659             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2660                 goto err_unreg_commac;
2661
2662         /* Attach to RGMII, if needed */
2663         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2664             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2665                 goto err_detach_zmii;
2666
2667         /* Attach to TAH, if needed */
2668         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2669             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2670                 goto err_detach_rgmii;
2671
2672         /* Set some link defaults before we can find out real parameters */
2673         dev->phy.speed = SPEED_100;
2674         dev->phy.duplex = DUPLEX_FULL;
2675         dev->phy.autoneg = AUTONEG_DISABLE;
2676         dev->phy.pause = dev->phy.asym_pause = 0;
2677         dev->stop_timeout = STOP_TIMEOUT_100;
2678         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2679
2680         /* Find PHY if any */
2681         err = emac_init_phy(dev);
2682         if (err != 0)
2683                 goto err_detach_tah;
2684
2685         /* Fill in the driver function table */
2686         ndev->open = &emac_open;
2687 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2688         if (dev->tah_dev) {
2689                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2690                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2691         } else
2692 #endif
2693                 ndev->hard_start_xmit = &emac_start_xmit;
2694         ndev->tx_timeout = &emac_tx_timeout;
2695         ndev->watchdog_timeo = 5 * HZ;
2696         ndev->stop = &emac_close;
2697         ndev->get_stats = &emac_stats;
2698         ndev->set_multicast_list = &emac_set_multicast_list;
2699         ndev->do_ioctl = &emac_ioctl;
2700         if (emac_phy_supports_gige(dev->phy_mode)) {
2701                 ndev->change_mtu = &emac_change_mtu;
2702                 dev->commac.ops = &emac_commac_sg_ops;
2703         }
2704         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2705
2706         netif_carrier_off(ndev);
2707         netif_stop_queue(ndev);
2708
2709         err = register_netdev(ndev);
2710         if (err) {
2711                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2712                        np->full_name, err);
2713                 goto err_detach_tah;
2714         }
2715
2716         /* Set our drvdata last as we don't want them visible until we are
2717          * fully initialized
2718          */
2719         wmb();
2720         dev_set_drvdata(&ofdev->dev, dev);
2721
2722         /* There's a new kid in town ! Let's tell everybody */
2723         wake_up_all(&emac_probe_wait);
2724
2725
2726         printk(KERN_INFO
2727                "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2728                ndev->name, dev->cell_index, np->full_name,
2729                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2730                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2731
2732         if (dev->phy.address >= 0)
2733                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2734                        dev->phy.def->name, dev->phy.address);
2735
2736         emac_dbg_register(dev);
2737
2738         /* Life is good */
2739         return 0;
2740
2741         /* I have a bad feeling about this ... */
2742
2743  err_detach_tah:
2744         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2745                 tah_detach(dev->tah_dev, dev->tah_port);
2746  err_detach_rgmii:
2747         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2748                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2749  err_detach_zmii:
2750         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2751                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2752  err_unreg_commac:
2753         mal_unregister_commac(dev->mal, &dev->commac);
2754  err_rel_deps:
2755         emac_put_deps(dev);
2756  err_reg_unmap:
2757         iounmap(dev->emacp);
2758  err_irq_unmap:
2759         if (dev->wol_irq != NO_IRQ)
2760                 irq_dispose_mapping(dev->wol_irq);
2761         if (dev->emac_irq != NO_IRQ)
2762                 irq_dispose_mapping(dev->emac_irq);
2763  err_free:
2764         kfree(ndev);
2765  err_gone:
2766         /* if we were on the bootlist, remove us as we won't show up and
2767          * wake up all waiters to notify them in case they were waiting
2768          * on us
2769          */
2770         if (blist) {
2771                 *blist = NULL;
2772                 wake_up_all(&emac_probe_wait);
2773         }
2774         return err;
2775 }
2776
2777 static int __devexit emac_remove(struct of_device *ofdev)
2778 {
2779         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2780
2781         DBG(dev, "remove" NL);
2782
2783         dev_set_drvdata(&ofdev->dev, NULL);
2784
2785         unregister_netdev(dev->ndev);
2786
2787         flush_scheduled_work();
2788
2789         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2790                 tah_detach(dev->tah_dev, dev->tah_port);
2791         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2792                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2793         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2794                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2795
2796         mal_unregister_commac(dev->mal, &dev->commac);
2797         emac_put_deps(dev);
2798
2799         emac_dbg_unregister(dev);
2800         iounmap(dev->emacp);
2801
2802         if (dev->wol_irq != NO_IRQ)
2803                 irq_dispose_mapping(dev->wol_irq);
2804         if (dev->emac_irq != NO_IRQ)
2805                 irq_dispose_mapping(dev->emac_irq);
2806
2807         kfree(dev->ndev);
2808
2809         return 0;
2810 }
2811
2812 /* XXX Features in here should be replaced by properties... */
2813 static struct of_device_id emac_match[] =
2814 {
2815         {
2816                 .type           = "network",
2817                 .compatible     = "ibm,emac",
2818         },
2819         {
2820                 .type           = "network",
2821                 .compatible     = "ibm,emac4",
2822         },
2823         {},
2824 };
2825
2826 static struct of_platform_driver emac_driver = {
2827         .name = "emac",
2828         .match_table = emac_match,
2829
2830         .probe = emac_probe,
2831         .remove = emac_remove,
2832 };
2833
2834 static void __init emac_make_bootlist(void)
2835 {
2836         struct device_node *np = NULL;
2837         int j, max, i = 0, k;
2838         int cell_indices[EMAC_BOOT_LIST_SIZE];
2839
2840         /* Collect EMACs */
2841         while((np = of_find_all_nodes(np)) != NULL) {
2842                 const u32 *idx;
2843
2844                 if (of_match_node(emac_match, np) == NULL)
2845                         continue;
2846                 if (of_get_property(np, "unused", NULL))
2847                         continue;
2848                 idx = of_get_property(np, "cell-index", NULL);
2849                 if (idx == NULL)
2850                         continue;
2851                 cell_indices[i] = *idx;
2852                 emac_boot_list[i++] = of_node_get(np);
2853                 if (i >= EMAC_BOOT_LIST_SIZE) {
2854                         of_node_put(np);
2855                         break;
2856                 }
2857         }
2858         max = i;
2859
2860         /* Bubble sort them (doh, what a creative algorithm :-) */
2861         for (i = 0; max > 1 && (i < (max - 1)); i++)
2862                 for (j = i; j < max; j++) {
2863                         if (cell_indices[i] > cell_indices[j]) {
2864                                 np = emac_boot_list[i];
2865                                 emac_boot_list[i] = emac_boot_list[j];
2866                                 emac_boot_list[j] = np;
2867                                 k = cell_indices[i];
2868                                 cell_indices[i] = cell_indices[j];
2869                                 cell_indices[j] = k;
2870                         }
2871                 }
2872 }
2873
2874 static int __init emac_init(void)
2875 {
2876         int rc;
2877
2878         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2879
2880         /* Init debug stuff */
2881         emac_init_debug();
2882
2883         /* Build EMAC boot list */
2884         emac_make_bootlist();
2885
2886         /* Init submodules */
2887         rc = mal_init();
2888         if (rc)
2889                 goto err;
2890         rc = zmii_init();
2891         if (rc)
2892                 goto err_mal;
2893         rc = rgmii_init();
2894         if (rc)
2895                 goto err_zmii;
2896         rc = tah_init();
2897         if (rc)
2898                 goto err_rgmii;
2899         rc = of_register_platform_driver(&emac_driver);
2900         if (rc)
2901                 goto err_tah;
2902
2903         return 0;
2904
2905  err_tah:
2906         tah_exit();
2907  err_rgmii:
2908         rgmii_exit();
2909  err_zmii:
2910         zmii_exit();
2911  err_mal:
2912         mal_exit();
2913  err:
2914         return rc;
2915 }
2916
2917 static void __exit emac_exit(void)
2918 {
2919         int i;
2920
2921         of_unregister_platform_driver(&emac_driver);
2922
2923         tah_exit();
2924         rgmii_exit();
2925         zmii_exit();
2926         mal_exit();
2927         emac_fini_debug();
2928
2929         /* Destroy EMAC boot list */
2930         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2931                 if (emac_boot_list[i])
2932                         of_node_put(emac_boot_list[i]);
2933 }
2934
2935 module_init(emac_init);
2936 module_exit(emac_exit);