Merge branch 'generic-ipi' into generic-ipi-for-linus
[linux-2.6] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h>
48
49 #include "core.h"
50
51 /*
52  * Lack of dma_unmap_???? calls is intentional.
53  *
54  * API-correct usage requires additional support state information to be
55  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56  * EMAC design (e.g. TX buffer passed from network stack can be split into
57  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58  * maintaining such information will add additional overhead.
59  * Current DMA API implementation for 4xx processors only ensures cache coherency
60  * and dma_unmap_???? routines are empty and are likely to stay this way.
61  * I decided to omit dma_unmap_??? calls because I don't want to add additional
62  * complexity just for the sake of following some abstract API, when it doesn't
63  * add any real benefit to the driver. I understand that this decision maybe
64  * controversial, but I really tried to make code API-correct and efficient
65  * at the same time and didn't come up with code I liked :(.                --ebs
66  */
67
68 #define DRV_NAME        "emac"
69 #define DRV_VERSION     "3.54"
70 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
71
72 MODULE_DESCRIPTION(DRV_DESC);
73 MODULE_AUTHOR
74     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
76
77 /*
78  * PPC64 doesn't (yet) have a cacheable_memcpy
79  */
80 #ifdef CONFIG_PPC64
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 #endif
83
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
86
87 /* If packet size is less than this number, we allocate small skb and copy packet
88  * contents into it instead of just sending original big skb up
89  */
90 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93  * to avoid re-using the same PHY ID in cases where the arch didn't
94  * setup precise phy_map entries
95  *
96  * XXX This is something that needs to be reworked as we can have multiple
97  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98  * probably require in that case to have explicit PHY IDs in the device-tree
99  */
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
102
103 /* This is the wait queue used to wait on any event related to probe, that
104  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105  */
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108 /* Having stable interface names is a doomed idea. However, it would be nice
109  * if we didn't have completely random interface names at boot too :-) It's
110  * just a matter of making everybody's life easier. Since we are doing
111  * threaded probing, it's a bit harder though. The base idea here is that
112  * we make up a list of all emacs in the device-tree before we register the
113  * driver. Every emac will then wait for the previous one in the list to
114  * initialize before itself. We should also keep that list ordered by
115  * cell_index.
116  * That list is only 4 entries long, meaning that additional EMACs don't
117  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118  */
119
120 #define EMAC_BOOT_LIST_SIZE     4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
125
126 /* I don't want to litter system log with timeout errors
127  * when we have brain-damaged PHY.
128  */
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
130                                              const char *error)
131 {
132         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133                                   EMAC_FTR_440EP_PHY_CLK_FIX))
134                 DBG(dev, "%s" NL, error);
135         else if (net_ratelimit())
136                 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
137 }
138
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147                 dcri_clrset(SDR0, SDR0_MFR,
148                             0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156                 dcri_clrset(SDR0, SDR0_MFR,
157                             SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON        HZ
163 #define PHY_POLL_LINK_OFF       (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10         1230
169 #define STOP_TIMEOUT_100        124
170 #define STOP_TIMEOUT_1000       13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190         "tx_bd_excessive_collisions", "tx_bd_late_collision",
191         "tx_bd_multple_collisions", "tx_bd_single_collision",
192         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193         "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202         return  phy_mode == PHY_MODE_GMII ||
203                 phy_mode == PHY_MODE_RGMII ||
204                 phy_mode == PHY_MODE_TBI ||
205                 phy_mode == PHY_MODE_RTBI;
206 }
207
208 static inline int emac_phy_gpcs(int phy_mode)
209 {
210         return  phy_mode == PHY_MODE_TBI ||
211                 phy_mode == PHY_MODE_RTBI;
212 }
213
214 static inline void emac_tx_enable(struct emac_instance *dev)
215 {
216         struct emac_regs __iomem *p = dev->emacp;
217         u32 r;
218
219         DBG(dev, "tx_enable" NL);
220
221         r = in_be32(&p->mr0);
222         if (!(r & EMAC_MR0_TXE))
223                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
224 }
225
226 static void emac_tx_disable(struct emac_instance *dev)
227 {
228         struct emac_regs __iomem *p = dev->emacp;
229         u32 r;
230
231         DBG(dev, "tx_disable" NL);
232
233         r = in_be32(&p->mr0);
234         if (r & EMAC_MR0_TXE) {
235                 int n = dev->stop_timeout;
236                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238                         udelay(1);
239                         --n;
240                 }
241                 if (unlikely(!n))
242                         emac_report_timeout_error(dev, "TX disable timeout");
243         }
244 }
245
246 static void emac_rx_enable(struct emac_instance *dev)
247 {
248         struct emac_regs __iomem *p = dev->emacp;
249         u32 r;
250
251         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
252                 goto out;
253
254         DBG(dev, "rx_enable" NL);
255
256         r = in_be32(&p->mr0);
257         if (!(r & EMAC_MR0_RXE)) {
258                 if (unlikely(!(r & EMAC_MR0_RXI))) {
259                         /* Wait if previous async disable is still in progress */
260                         int n = dev->stop_timeout;
261                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262                                 udelay(1);
263                                 --n;
264                         }
265                         if (unlikely(!n))
266                                 emac_report_timeout_error(dev,
267                                                           "RX disable timeout");
268                 }
269                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270         }
271  out:
272         ;
273 }
274
275 static void emac_rx_disable(struct emac_instance *dev)
276 {
277         struct emac_regs __iomem *p = dev->emacp;
278         u32 r;
279
280         DBG(dev, "rx_disable" NL);
281
282         r = in_be32(&p->mr0);
283         if (r & EMAC_MR0_RXE) {
284                 int n = dev->stop_timeout;
285                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
287                         udelay(1);
288                         --n;
289                 }
290                 if (unlikely(!n))
291                         emac_report_timeout_error(dev, "RX disable timeout");
292         }
293 }
294
295 static inline void emac_netif_stop(struct emac_instance *dev)
296 {
297         netif_tx_lock_bh(dev->ndev);
298         dev->no_mcast = 1;
299         netif_tx_unlock_bh(dev->ndev);
300         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
301         mal_poll_disable(dev->mal, &dev->commac);
302         netif_tx_disable(dev->ndev);
303 }
304
305 static inline void emac_netif_start(struct emac_instance *dev)
306 {
307         netif_tx_lock_bh(dev->ndev);
308         dev->no_mcast = 0;
309         if (dev->mcast_pending && netif_running(dev->ndev))
310                 __emac_set_multicast_list(dev);
311         netif_tx_unlock_bh(dev->ndev);
312
313         netif_wake_queue(dev->ndev);
314
315         /* NOTE: unconditional netif_wake_queue is only appropriate
316          * so long as all callers are assured to have free tx slots
317          * (taken from tg3... though the case where that is wrong is
318          *  not terribly harmful)
319          */
320         mal_poll_enable(dev->mal, &dev->commac);
321 }
322
323 static inline void emac_rx_disable_async(struct emac_instance *dev)
324 {
325         struct emac_regs __iomem *p = dev->emacp;
326         u32 r;
327
328         DBG(dev, "rx_disable_async" NL);
329
330         r = in_be32(&p->mr0);
331         if (r & EMAC_MR0_RXE)
332                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
333 }
334
335 static int emac_reset(struct emac_instance *dev)
336 {
337         struct emac_regs __iomem *p = dev->emacp;
338         int n = 20;
339
340         DBG(dev, "reset" NL);
341
342         if (!dev->reset_failed) {
343                 /* 40x erratum suggests stopping RX channel before reset,
344                  * we stop TX as well
345                  */
346                 emac_rx_disable(dev);
347                 emac_tx_disable(dev);
348         }
349
350         out_be32(&p->mr0, EMAC_MR0_SRST);
351         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
352                 --n;
353
354         if (n) {
355                 dev->reset_failed = 0;
356                 return 0;
357         } else {
358                 emac_report_timeout_error(dev, "reset timeout");
359                 dev->reset_failed = 1;
360                 return -ETIMEDOUT;
361         }
362 }
363
364 static void emac_hash_mc(struct emac_instance *dev)
365 {
366         struct emac_regs __iomem *p = dev->emacp;
367         u16 gaht[4] = { 0 };
368         struct dev_mc_list *dmi;
369
370         DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
371
372         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
373                 int bit;
374                 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
375                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
376                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
377
378                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
379                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
380         }
381         out_be32(&p->gaht1, gaht[0]);
382         out_be32(&p->gaht2, gaht[1]);
383         out_be32(&p->gaht3, gaht[2]);
384         out_be32(&p->gaht4, gaht[3]);
385 }
386
387 static inline u32 emac_iff2rmr(struct net_device *ndev)
388 {
389         struct emac_instance *dev = netdev_priv(ndev);
390         u32 r;
391
392         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
393
394         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
395             r |= EMAC4_RMR_BASE;
396         else
397             r |= EMAC_RMR_BASE;
398
399         if (ndev->flags & IFF_PROMISC)
400                 r |= EMAC_RMR_PME;
401         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
402                 r |= EMAC_RMR_PMME;
403         else if (ndev->mc_count > 0)
404                 r |= EMAC_RMR_MAE;
405
406         return r;
407 }
408
409 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
410 {
411         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
412
413         DBG2(dev, "__emac_calc_base_mr1" NL);
414
415         switch(tx_size) {
416         case 2048:
417                 ret |= EMAC_MR1_TFS_2K;
418                 break;
419         default:
420                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
421                        dev->ndev->name, tx_size);
422         }
423
424         switch(rx_size) {
425         case 16384:
426                 ret |= EMAC_MR1_RFS_16K;
427                 break;
428         case 4096:
429                 ret |= EMAC_MR1_RFS_4K;
430                 break;
431         default:
432                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433                        dev->ndev->name, rx_size);
434         }
435
436         return ret;
437 }
438
439 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
440 {
441         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
442                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
443
444         DBG2(dev, "__emac4_calc_base_mr1" NL);
445
446         switch(tx_size) {
447         case 4096:
448                 ret |= EMAC4_MR1_TFS_4K;
449                 break;
450         case 2048:
451                 ret |= EMAC4_MR1_TFS_2K;
452                 break;
453         default:
454                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
455                        dev->ndev->name, tx_size);
456         }
457
458         switch(rx_size) {
459         case 16384:
460                 ret |= EMAC4_MR1_RFS_16K;
461                 break;
462         case 4096:
463                 ret |= EMAC4_MR1_RFS_4K;
464                 break;
465         case 2048:
466                 ret |= EMAC4_MR1_RFS_2K;
467                 break;
468         default:
469                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
470                        dev->ndev->name, rx_size);
471         }
472
473         return ret;
474 }
475
476 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
477 {
478         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
479                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
480                 __emac_calc_base_mr1(dev, tx_size, rx_size);
481 }
482
483 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
484 {
485         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
486                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
487         else
488                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
489 }
490
491 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
492                                  unsigned int low, unsigned int high)
493 {
494         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
495                 return (low << 22) | ( (high & 0x3ff) << 6);
496         else
497                 return (low << 23) | ( (high & 0x1ff) << 7);
498 }
499
500 static int emac_configure(struct emac_instance *dev)
501 {
502         struct emac_regs __iomem *p = dev->emacp;
503         struct net_device *ndev = dev->ndev;
504         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
505         u32 r, mr1 = 0;
506
507         DBG(dev, "configure" NL);
508
509         if (!link) {
510                 out_be32(&p->mr1, in_be32(&p->mr1)
511                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
512                 udelay(100);
513         } else if (emac_reset(dev) < 0)
514                 return -ETIMEDOUT;
515
516         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
517                 tah_reset(dev->tah_dev);
518
519         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
520             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
521
522         /* Default fifo sizes */
523         tx_size = dev->tx_fifo_size;
524         rx_size = dev->rx_fifo_size;
525
526         /* No link, force loopback */
527         if (!link)
528                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
529
530         /* Check for full duplex */
531         else if (dev->phy.duplex == DUPLEX_FULL)
532                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
533
534         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
535         dev->stop_timeout = STOP_TIMEOUT_10;
536         switch (dev->phy.speed) {
537         case SPEED_1000:
538                 if (emac_phy_gpcs(dev->phy.mode)) {
539                         mr1 |= EMAC_MR1_MF_1000GPCS |
540                                 EMAC_MR1_MF_IPPA(dev->phy.address);
541
542                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
543                          * identify this GPCS PHY later.
544                          */
545                         out_be32(&p->ipcr, 0xdeadbeef);
546                 } else
547                         mr1 |= EMAC_MR1_MF_1000;
548
549                 /* Extended fifo sizes */
550                 tx_size = dev->tx_fifo_size_gige;
551                 rx_size = dev->rx_fifo_size_gige;
552
553                 if (dev->ndev->mtu > ETH_DATA_LEN) {
554                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
555                                 mr1 |= EMAC4_MR1_JPSM;
556                         else
557                                 mr1 |= EMAC_MR1_JPSM;
558                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
559                 } else
560                         dev->stop_timeout = STOP_TIMEOUT_1000;
561                 break;
562         case SPEED_100:
563                 mr1 |= EMAC_MR1_MF_100;
564                 dev->stop_timeout = STOP_TIMEOUT_100;
565                 break;
566         default: /* make gcc happy */
567                 break;
568         }
569
570         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
571                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
572                                 dev->phy.speed);
573         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
574                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
575
576         /* on 40x erratum forces us to NOT use integrated flow control,
577          * let's hope it works on 44x ;)
578          */
579         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
580             dev->phy.duplex == DUPLEX_FULL) {
581                 if (dev->phy.pause)
582                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
583                 else if (dev->phy.asym_pause)
584                         mr1 |= EMAC_MR1_APP;
585         }
586
587         /* Add base settings & fifo sizes & program MR1 */
588         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
589         out_be32(&p->mr1, mr1);
590
591         /* Set individual MAC address */
592         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
593         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
594                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
595                  ndev->dev_addr[5]);
596
597         /* VLAN Tag Protocol ID */
598         out_be32(&p->vtpid, 0x8100);
599
600         /* Receive mode register */
601         r = emac_iff2rmr(ndev);
602         if (r & EMAC_RMR_MAE)
603                 emac_hash_mc(dev);
604         out_be32(&p->rmr, r);
605
606         /* FIFOs thresholds */
607         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
608                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
609                                tx_size / 2 / dev->fifo_entry_size);
610         else
611                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
612                               tx_size / 2 / dev->fifo_entry_size);
613         out_be32(&p->tmr1, r);
614         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
615
616         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
617            there should be still enough space in FIFO to allow the our link
618            partner time to process this frame and also time to send PAUSE
619            frame itself.
620
621            Here is the worst case scenario for the RX FIFO "headroom"
622            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
623
624            1) One maximum-length frame on TX                    1522 bytes
625            2) One PAUSE frame time                                64 bytes
626            3) PAUSE frame decode time allowance                   64 bytes
627            4) One maximum-length frame on RX                    1522 bytes
628            5) Round-trip propagation delay of the link (100Mb)    15 bytes
629            ----------
630            3187 bytes
631
632            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
633            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
634          */
635         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
636                            rx_size / 4 / dev->fifo_entry_size);
637         out_be32(&p->rwmr, r);
638
639         /* Set PAUSE timer to the maximum */
640         out_be32(&p->ptr, 0xffff);
641
642         /* IRQ sources */
643         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
644                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
645                 EMAC_ISR_IRE | EMAC_ISR_TE;
646         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
647             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
648                                                   EMAC4_ISR_RXOE | */;
649         out_be32(&p->iser,  r);
650
651         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
652         if (emac_phy_gpcs(dev->phy.mode))
653                 emac_mii_reset_phy(&dev->phy);
654
655         /* Required for Pause packet support in EMAC */
656         dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
657
658         return 0;
659 }
660
661 static void emac_reinitialize(struct emac_instance *dev)
662 {
663         DBG(dev, "reinitialize" NL);
664
665         emac_netif_stop(dev);
666         if (!emac_configure(dev)) {
667                 emac_tx_enable(dev);
668                 emac_rx_enable(dev);
669         }
670         emac_netif_start(dev);
671 }
672
673 static void emac_full_tx_reset(struct emac_instance *dev)
674 {
675         DBG(dev, "full_tx_reset" NL);
676
677         emac_tx_disable(dev);
678         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
679         emac_clean_tx_ring(dev);
680         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
681
682         emac_configure(dev);
683
684         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
685         emac_tx_enable(dev);
686         emac_rx_enable(dev);
687 }
688
689 static void emac_reset_work(struct work_struct *work)
690 {
691         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
692
693         DBG(dev, "reset_work" NL);
694
695         mutex_lock(&dev->link_lock);
696         if (dev->opened) {
697                 emac_netif_stop(dev);
698                 emac_full_tx_reset(dev);
699                 emac_netif_start(dev);
700         }
701         mutex_unlock(&dev->link_lock);
702 }
703
704 static void emac_tx_timeout(struct net_device *ndev)
705 {
706         struct emac_instance *dev = netdev_priv(ndev);
707
708         DBG(dev, "tx_timeout" NL);
709
710         schedule_work(&dev->reset_work);
711 }
712
713
714 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
715 {
716         int done = !!(stacr & EMAC_STACR_OC);
717
718         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
719                 done = !done;
720
721         return done;
722 };
723
724 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
725 {
726         struct emac_regs __iomem *p = dev->emacp;
727         u32 r = 0;
728         int n, err = -ETIMEDOUT;
729
730         mutex_lock(&dev->mdio_lock);
731
732         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
733
734         /* Enable proper MDIO port */
735         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
736                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
737         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
738                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
739
740         /* Wait for management interface to become idle */
741         n = 20;
742         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
743                 udelay(1);
744                 if (!--n) {
745                         DBG2(dev, " -> timeout wait idle\n");
746                         goto bail;
747                 }
748         }
749
750         /* Issue read command */
751         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
752                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
753         else
754                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
755         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
756                 r |= EMAC_STACR_OC;
757         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
758                 r |= EMACX_STACR_STAC_READ;
759         else
760                 r |= EMAC_STACR_STAC_READ;
761         r |= (reg & EMAC_STACR_PRA_MASK)
762                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
763         out_be32(&p->stacr, r);
764
765         /* Wait for read to complete */
766         n = 200;
767         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
768                 udelay(1);
769                 if (!--n) {
770                         DBG2(dev, " -> timeout wait complete\n");
771                         goto bail;
772                 }
773         }
774
775         if (unlikely(r & EMAC_STACR_PHYE)) {
776                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
777                 err = -EREMOTEIO;
778                 goto bail;
779         }
780
781         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
782
783         DBG2(dev, "mdio_read -> %04x" NL, r);
784         err = 0;
785  bail:
786         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
787                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
788         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
789                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
790         mutex_unlock(&dev->mdio_lock);
791
792         return err == 0 ? r : err;
793 }
794
795 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
796                               u16 val)
797 {
798         struct emac_regs __iomem *p = dev->emacp;
799         u32 r = 0;
800         int n, err = -ETIMEDOUT;
801
802         mutex_lock(&dev->mdio_lock);
803
804         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
805
806         /* Enable proper MDIO port */
807         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
808                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
809         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
810                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
811
812         /* Wait for management interface to be idle */
813         n = 20;
814         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
815                 udelay(1);
816                 if (!--n) {
817                         DBG2(dev, " -> timeout wait idle\n");
818                         goto bail;
819                 }
820         }
821
822         /* Issue write command */
823         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
824                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
825         else
826                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
827         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
828                 r |= EMAC_STACR_OC;
829         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
830                 r |= EMACX_STACR_STAC_WRITE;
831         else
832                 r |= EMAC_STACR_STAC_WRITE;
833         r |= (reg & EMAC_STACR_PRA_MASK) |
834                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
835                 (val << EMAC_STACR_PHYD_SHIFT);
836         out_be32(&p->stacr, r);
837
838         /* Wait for write to complete */
839         n = 200;
840         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
841                 udelay(1);
842                 if (!--n) {
843                         DBG2(dev, " -> timeout wait complete\n");
844                         goto bail;
845                 }
846         }
847         err = 0;
848  bail:
849         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
850                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
851         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
852                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
853         mutex_unlock(&dev->mdio_lock);
854 }
855
856 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
857 {
858         struct emac_instance *dev = netdev_priv(ndev);
859         int res;
860
861         res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
862                                (u8) id, (u8) reg);
863         return res;
864 }
865
866 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
867 {
868         struct emac_instance *dev = netdev_priv(ndev);
869
870         __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
871                           (u8) id, (u8) reg, (u16) val);
872 }
873
874 /* Tx lock BH */
875 static void __emac_set_multicast_list(struct emac_instance *dev)
876 {
877         struct emac_regs __iomem *p = dev->emacp;
878         u32 rmr = emac_iff2rmr(dev->ndev);
879
880         DBG(dev, "__multicast %08x" NL, rmr);
881
882         /* I decided to relax register access rules here to avoid
883          * full EMAC reset.
884          *
885          * There is a real problem with EMAC4 core if we use MWSW_001 bit
886          * in MR1 register and do a full EMAC reset.
887          * One TX BD status update is delayed and, after EMAC reset, it
888          * never happens, resulting in TX hung (it'll be recovered by TX
889          * timeout handler eventually, but this is just gross).
890          * So we either have to do full TX reset or try to cheat here :)
891          *
892          * The only required change is to RX mode register, so I *think* all
893          * we need is just to stop RX channel. This seems to work on all
894          * tested SoCs.                                                --ebs
895          *
896          * If we need the full reset, we might just trigger the workqueue
897          * and do it async... a bit nasty but should work --BenH
898          */
899         dev->mcast_pending = 0;
900         emac_rx_disable(dev);
901         if (rmr & EMAC_RMR_MAE)
902                 emac_hash_mc(dev);
903         out_be32(&p->rmr, rmr);
904         emac_rx_enable(dev);
905 }
906
907 /* Tx lock BH */
908 static void emac_set_multicast_list(struct net_device *ndev)
909 {
910         struct emac_instance *dev = netdev_priv(ndev);
911
912         DBG(dev, "multicast" NL);
913
914         BUG_ON(!netif_running(dev->ndev));
915
916         if (dev->no_mcast) {
917                 dev->mcast_pending = 1;
918                 return;
919         }
920         __emac_set_multicast_list(dev);
921 }
922
923 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
924 {
925         int rx_sync_size = emac_rx_sync_size(new_mtu);
926         int rx_skb_size = emac_rx_skb_size(new_mtu);
927         int i, ret = 0;
928
929         mutex_lock(&dev->link_lock);
930         emac_netif_stop(dev);
931         emac_rx_disable(dev);
932         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
933
934         if (dev->rx_sg_skb) {
935                 ++dev->estats.rx_dropped_resize;
936                 dev_kfree_skb(dev->rx_sg_skb);
937                 dev->rx_sg_skb = NULL;
938         }
939
940         /* Make a first pass over RX ring and mark BDs ready, dropping
941          * non-processed packets on the way. We need this as a separate pass
942          * to simplify error recovery in the case of allocation failure later.
943          */
944         for (i = 0; i < NUM_RX_BUFF; ++i) {
945                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
946                         ++dev->estats.rx_dropped_resize;
947
948                 dev->rx_desc[i].data_len = 0;
949                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
950                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
951         }
952
953         /* Reallocate RX ring only if bigger skb buffers are required */
954         if (rx_skb_size <= dev->rx_skb_size)
955                 goto skip;
956
957         /* Second pass, allocate new skbs */
958         for (i = 0; i < NUM_RX_BUFF; ++i) {
959                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
960                 if (!skb) {
961                         ret = -ENOMEM;
962                         goto oom;
963                 }
964
965                 BUG_ON(!dev->rx_skb[i]);
966                 dev_kfree_skb(dev->rx_skb[i]);
967
968                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
969                 dev->rx_desc[i].data_ptr =
970                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
971                                    DMA_FROM_DEVICE) + 2;
972                 dev->rx_skb[i] = skb;
973         }
974  skip:
975         /* Check if we need to change "Jumbo" bit in MR1 */
976         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
977                 /* This is to prevent starting RX channel in emac_rx_enable() */
978                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
979
980                 dev->ndev->mtu = new_mtu;
981                 emac_full_tx_reset(dev);
982         }
983
984         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
985  oom:
986         /* Restart RX */
987         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
988         dev->rx_slot = 0;
989         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
990         emac_rx_enable(dev);
991         emac_netif_start(dev);
992         mutex_unlock(&dev->link_lock);
993
994         return ret;
995 }
996
997 /* Process ctx, rtnl_lock semaphore */
998 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
999 {
1000         struct emac_instance *dev = netdev_priv(ndev);
1001         int ret = 0;
1002
1003         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1004                 return -EINVAL;
1005
1006         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1007
1008         if (netif_running(ndev)) {
1009                 /* Check if we really need to reinitalize RX ring */
1010                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1011                         ret = emac_resize_rx_ring(dev, new_mtu);
1012         }
1013
1014         if (!ret) {
1015                 ndev->mtu = new_mtu;
1016                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1017                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1018         }
1019
1020         return ret;
1021 }
1022
1023 static void emac_clean_tx_ring(struct emac_instance *dev)
1024 {
1025         int i;
1026
1027         for (i = 0; i < NUM_TX_BUFF; ++i) {
1028                 if (dev->tx_skb[i]) {
1029                         dev_kfree_skb(dev->tx_skb[i]);
1030                         dev->tx_skb[i] = NULL;
1031                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1032                                 ++dev->estats.tx_dropped;
1033                 }
1034                 dev->tx_desc[i].ctrl = 0;
1035                 dev->tx_desc[i].data_ptr = 0;
1036         }
1037 }
1038
1039 static void emac_clean_rx_ring(struct emac_instance *dev)
1040 {
1041         int i;
1042
1043         for (i = 0; i < NUM_RX_BUFF; ++i)
1044                 if (dev->rx_skb[i]) {
1045                         dev->rx_desc[i].ctrl = 0;
1046                         dev_kfree_skb(dev->rx_skb[i]);
1047                         dev->rx_skb[i] = NULL;
1048                         dev->rx_desc[i].data_ptr = 0;
1049                 }
1050
1051         if (dev->rx_sg_skb) {
1052                 dev_kfree_skb(dev->rx_sg_skb);
1053                 dev->rx_sg_skb = NULL;
1054         }
1055 }
1056
1057 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1058                                     gfp_t flags)
1059 {
1060         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1061         if (unlikely(!skb))
1062                 return -ENOMEM;
1063
1064         dev->rx_skb[slot] = skb;
1065         dev->rx_desc[slot].data_len = 0;
1066
1067         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1068         dev->rx_desc[slot].data_ptr =
1069             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1070                            DMA_FROM_DEVICE) + 2;
1071         wmb();
1072         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1073             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1074
1075         return 0;
1076 }
1077
1078 static void emac_print_link_status(struct emac_instance *dev)
1079 {
1080         if (netif_carrier_ok(dev->ndev))
1081                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1082                        dev->ndev->name, dev->phy.speed,
1083                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1084                        dev->phy.pause ? ", pause enabled" :
1085                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1086         else
1087                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1088 }
1089
1090 /* Process ctx, rtnl_lock semaphore */
1091 static int emac_open(struct net_device *ndev)
1092 {
1093         struct emac_instance *dev = netdev_priv(ndev);
1094         int err, i;
1095
1096         DBG(dev, "open" NL);
1097
1098         /* Setup error IRQ handler */
1099         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1100         if (err) {
1101                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1102                        ndev->name, dev->emac_irq);
1103                 return err;
1104         }
1105
1106         /* Allocate RX ring */
1107         for (i = 0; i < NUM_RX_BUFF; ++i)
1108                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1109                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1110                                ndev->name);
1111                         goto oom;
1112                 }
1113
1114         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1115         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1116         dev->rx_sg_skb = NULL;
1117
1118         mutex_lock(&dev->link_lock);
1119         dev->opened = 1;
1120
1121         /* Start PHY polling now.
1122          */
1123         if (dev->phy.address >= 0) {
1124                 int link_poll_interval;
1125                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1126                         dev->phy.def->ops->read_link(&dev->phy);
1127                         emac_rx_clk_default(dev);
1128                         netif_carrier_on(dev->ndev);
1129                         link_poll_interval = PHY_POLL_LINK_ON;
1130                 } else {
1131                         emac_rx_clk_tx(dev);
1132                         netif_carrier_off(dev->ndev);
1133                         link_poll_interval = PHY_POLL_LINK_OFF;
1134                 }
1135                 dev->link_polling = 1;
1136                 wmb();
1137                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1138                 emac_print_link_status(dev);
1139         } else
1140                 netif_carrier_on(dev->ndev);
1141
1142         emac_configure(dev);
1143         mal_poll_add(dev->mal, &dev->commac);
1144         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1145         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1146         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1147         emac_tx_enable(dev);
1148         emac_rx_enable(dev);
1149         emac_netif_start(dev);
1150
1151         mutex_unlock(&dev->link_lock);
1152
1153         return 0;
1154  oom:
1155         emac_clean_rx_ring(dev);
1156         free_irq(dev->emac_irq, dev);
1157
1158         return -ENOMEM;
1159 }
1160
1161 /* BHs disabled */
1162 #if 0
1163 static int emac_link_differs(struct emac_instance *dev)
1164 {
1165         u32 r = in_be32(&dev->emacp->mr1);
1166
1167         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1168         int speed, pause, asym_pause;
1169
1170         if (r & EMAC_MR1_MF_1000)
1171                 speed = SPEED_1000;
1172         else if (r & EMAC_MR1_MF_100)
1173                 speed = SPEED_100;
1174         else
1175                 speed = SPEED_10;
1176
1177         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1178         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1179                 pause = 1;
1180                 asym_pause = 0;
1181                 break;
1182         case EMAC_MR1_APP:
1183                 pause = 0;
1184                 asym_pause = 1;
1185                 break;
1186         default:
1187                 pause = asym_pause = 0;
1188         }
1189         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1190             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1191 }
1192 #endif
1193
1194 static void emac_link_timer(struct work_struct *work)
1195 {
1196         struct emac_instance *dev =
1197                 container_of((struct delayed_work *)work,
1198                              struct emac_instance, link_work);
1199         int link_poll_interval;
1200
1201         mutex_lock(&dev->link_lock);
1202         DBG2(dev, "link timer" NL);
1203
1204         if (!dev->opened)
1205                 goto bail;
1206
1207         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1208                 if (!netif_carrier_ok(dev->ndev)) {
1209                         emac_rx_clk_default(dev);
1210                         /* Get new link parameters */
1211                         dev->phy.def->ops->read_link(&dev->phy);
1212
1213                         netif_carrier_on(dev->ndev);
1214                         emac_netif_stop(dev);
1215                         emac_full_tx_reset(dev);
1216                         emac_netif_start(dev);
1217                         emac_print_link_status(dev);
1218                 }
1219                 link_poll_interval = PHY_POLL_LINK_ON;
1220         } else {
1221                 if (netif_carrier_ok(dev->ndev)) {
1222                         emac_rx_clk_tx(dev);
1223                         netif_carrier_off(dev->ndev);
1224                         netif_tx_disable(dev->ndev);
1225                         emac_reinitialize(dev);
1226                         emac_print_link_status(dev);
1227                 }
1228                 link_poll_interval = PHY_POLL_LINK_OFF;
1229         }
1230         schedule_delayed_work(&dev->link_work, link_poll_interval);
1231  bail:
1232         mutex_unlock(&dev->link_lock);
1233 }
1234
1235 static void emac_force_link_update(struct emac_instance *dev)
1236 {
1237         netif_carrier_off(dev->ndev);
1238         smp_rmb();
1239         if (dev->link_polling) {
1240                 cancel_rearming_delayed_work(&dev->link_work);
1241                 if (dev->link_polling)
1242                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1243         }
1244 }
1245
1246 /* Process ctx, rtnl_lock semaphore */
1247 static int emac_close(struct net_device *ndev)
1248 {
1249         struct emac_instance *dev = netdev_priv(ndev);
1250
1251         DBG(dev, "close" NL);
1252
1253         if (dev->phy.address >= 0) {
1254                 dev->link_polling = 0;
1255                 cancel_rearming_delayed_work(&dev->link_work);
1256         }
1257         mutex_lock(&dev->link_lock);
1258         emac_netif_stop(dev);
1259         dev->opened = 0;
1260         mutex_unlock(&dev->link_lock);
1261
1262         emac_rx_disable(dev);
1263         emac_tx_disable(dev);
1264         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1265         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1266         mal_poll_del(dev->mal, &dev->commac);
1267
1268         emac_clean_tx_ring(dev);
1269         emac_clean_rx_ring(dev);
1270
1271         free_irq(dev->emac_irq, dev);
1272
1273         return 0;
1274 }
1275
1276 static inline u16 emac_tx_csum(struct emac_instance *dev,
1277                                struct sk_buff *skb)
1278 {
1279         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1280                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1281                 ++dev->stats.tx_packets_csum;
1282                 return EMAC_TX_CTRL_TAH_CSUM;
1283         }
1284         return 0;
1285 }
1286
1287 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1288 {
1289         struct emac_regs __iomem *p = dev->emacp;
1290         struct net_device *ndev = dev->ndev;
1291
1292         /* Send the packet out. If the if makes a significant perf
1293          * difference, then we can store the TMR0 value in "dev"
1294          * instead
1295          */
1296         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1297                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1298         else
1299                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1300
1301         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1302                 netif_stop_queue(ndev);
1303                 DBG2(dev, "stopped TX queue" NL);
1304         }
1305
1306         ndev->trans_start = jiffies;
1307         ++dev->stats.tx_packets;
1308         dev->stats.tx_bytes += len;
1309
1310         return 0;
1311 }
1312
1313 /* Tx lock BH */
1314 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1315 {
1316         struct emac_instance *dev = netdev_priv(ndev);
1317         unsigned int len = skb->len;
1318         int slot;
1319
1320         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1321             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1322
1323         slot = dev->tx_slot++;
1324         if (dev->tx_slot == NUM_TX_BUFF) {
1325                 dev->tx_slot = 0;
1326                 ctrl |= MAL_TX_CTRL_WRAP;
1327         }
1328
1329         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1330
1331         dev->tx_skb[slot] = skb;
1332         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1333                                                      skb->data, len,
1334                                                      DMA_TO_DEVICE);
1335         dev->tx_desc[slot].data_len = (u16) len;
1336         wmb();
1337         dev->tx_desc[slot].ctrl = ctrl;
1338
1339         return emac_xmit_finish(dev, len);
1340 }
1341
1342 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1343                                   u32 pd, int len, int last, u16 base_ctrl)
1344 {
1345         while (1) {
1346                 u16 ctrl = base_ctrl;
1347                 int chunk = min(len, MAL_MAX_TX_SIZE);
1348                 len -= chunk;
1349
1350                 slot = (slot + 1) % NUM_TX_BUFF;
1351
1352                 if (last && !len)
1353                         ctrl |= MAL_TX_CTRL_LAST;
1354                 if (slot == NUM_TX_BUFF - 1)
1355                         ctrl |= MAL_TX_CTRL_WRAP;
1356
1357                 dev->tx_skb[slot] = NULL;
1358                 dev->tx_desc[slot].data_ptr = pd;
1359                 dev->tx_desc[slot].data_len = (u16) chunk;
1360                 dev->tx_desc[slot].ctrl = ctrl;
1361                 ++dev->tx_cnt;
1362
1363                 if (!len)
1364                         break;
1365
1366                 pd += chunk;
1367         }
1368         return slot;
1369 }
1370
1371 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1372 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1373 {
1374         struct emac_instance *dev = netdev_priv(ndev);
1375         int nr_frags = skb_shinfo(skb)->nr_frags;
1376         int len = skb->len, chunk;
1377         int slot, i;
1378         u16 ctrl;
1379         u32 pd;
1380
1381         /* This is common "fast" path */
1382         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1383                 return emac_start_xmit(skb, ndev);
1384
1385         len -= skb->data_len;
1386
1387         /* Note, this is only an *estimation*, we can still run out of empty
1388          * slots because of the additional fragmentation into
1389          * MAL_MAX_TX_SIZE-sized chunks
1390          */
1391         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1392                 goto stop_queue;
1393
1394         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1395             emac_tx_csum(dev, skb);
1396         slot = dev->tx_slot;
1397
1398         /* skb data */
1399         dev->tx_skb[slot] = NULL;
1400         chunk = min(len, MAL_MAX_TX_SIZE);
1401         dev->tx_desc[slot].data_ptr = pd =
1402             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1403         dev->tx_desc[slot].data_len = (u16) chunk;
1404         len -= chunk;
1405         if (unlikely(len))
1406                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1407                                        ctrl);
1408         /* skb fragments */
1409         for (i = 0; i < nr_frags; ++i) {
1410                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1411                 len = frag->size;
1412
1413                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1414                         goto undo_frame;
1415
1416                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1417                                   DMA_TO_DEVICE);
1418
1419                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1420                                        ctrl);
1421         }
1422
1423         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1424
1425         /* Attach skb to the last slot so we don't release it too early */
1426         dev->tx_skb[slot] = skb;
1427
1428         /* Send the packet out */
1429         if (dev->tx_slot == NUM_TX_BUFF - 1)
1430                 ctrl |= MAL_TX_CTRL_WRAP;
1431         wmb();
1432         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1433         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1434
1435         return emac_xmit_finish(dev, skb->len);
1436
1437  undo_frame:
1438         /* Well, too bad. Our previous estimation was overly optimistic.
1439          * Undo everything.
1440          */
1441         while (slot != dev->tx_slot) {
1442                 dev->tx_desc[slot].ctrl = 0;
1443                 --dev->tx_cnt;
1444                 if (--slot < 0)
1445                         slot = NUM_TX_BUFF - 1;
1446         }
1447         ++dev->estats.tx_undo;
1448
1449  stop_queue:
1450         netif_stop_queue(ndev);
1451         DBG2(dev, "stopped TX queue" NL);
1452         return 1;
1453 }
1454
1455 /* Tx lock BHs */
1456 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1457 {
1458         struct emac_error_stats *st = &dev->estats;
1459
1460         DBG(dev, "BD TX error %04x" NL, ctrl);
1461
1462         ++st->tx_bd_errors;
1463         if (ctrl & EMAC_TX_ST_BFCS)
1464                 ++st->tx_bd_bad_fcs;
1465         if (ctrl & EMAC_TX_ST_LCS)
1466                 ++st->tx_bd_carrier_loss;
1467         if (ctrl & EMAC_TX_ST_ED)
1468                 ++st->tx_bd_excessive_deferral;
1469         if (ctrl & EMAC_TX_ST_EC)
1470                 ++st->tx_bd_excessive_collisions;
1471         if (ctrl & EMAC_TX_ST_LC)
1472                 ++st->tx_bd_late_collision;
1473         if (ctrl & EMAC_TX_ST_MC)
1474                 ++st->tx_bd_multple_collisions;
1475         if (ctrl & EMAC_TX_ST_SC)
1476                 ++st->tx_bd_single_collision;
1477         if (ctrl & EMAC_TX_ST_UR)
1478                 ++st->tx_bd_underrun;
1479         if (ctrl & EMAC_TX_ST_SQE)
1480                 ++st->tx_bd_sqe;
1481 }
1482
1483 static void emac_poll_tx(void *param)
1484 {
1485         struct emac_instance *dev = param;
1486         u32 bad_mask;
1487
1488         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1489
1490         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1491                 bad_mask = EMAC_IS_BAD_TX_TAH;
1492         else
1493                 bad_mask = EMAC_IS_BAD_TX;
1494
1495         netif_tx_lock_bh(dev->ndev);
1496         if (dev->tx_cnt) {
1497                 u16 ctrl;
1498                 int slot = dev->ack_slot, n = 0;
1499         again:
1500                 ctrl = dev->tx_desc[slot].ctrl;
1501                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1502                         struct sk_buff *skb = dev->tx_skb[slot];
1503                         ++n;
1504
1505                         if (skb) {
1506                                 dev_kfree_skb(skb);
1507                                 dev->tx_skb[slot] = NULL;
1508                         }
1509                         slot = (slot + 1) % NUM_TX_BUFF;
1510
1511                         if (unlikely(ctrl & bad_mask))
1512                                 emac_parse_tx_error(dev, ctrl);
1513
1514                         if (--dev->tx_cnt)
1515                                 goto again;
1516                 }
1517                 if (n) {
1518                         dev->ack_slot = slot;
1519                         if (netif_queue_stopped(dev->ndev) &&
1520                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1521                                 netif_wake_queue(dev->ndev);
1522
1523                         DBG2(dev, "tx %d pkts" NL, n);
1524                 }
1525         }
1526         netif_tx_unlock_bh(dev->ndev);
1527 }
1528
1529 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1530                                        int len)
1531 {
1532         struct sk_buff *skb = dev->rx_skb[slot];
1533
1534         DBG2(dev, "recycle %d %d" NL, slot, len);
1535
1536         if (len)
1537                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1538                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1539
1540         dev->rx_desc[slot].data_len = 0;
1541         wmb();
1542         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1543             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1544 }
1545
1546 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1547 {
1548         struct emac_error_stats *st = &dev->estats;
1549
1550         DBG(dev, "BD RX error %04x" NL, ctrl);
1551
1552         ++st->rx_bd_errors;
1553         if (ctrl & EMAC_RX_ST_OE)
1554                 ++st->rx_bd_overrun;
1555         if (ctrl & EMAC_RX_ST_BP)
1556                 ++st->rx_bd_bad_packet;
1557         if (ctrl & EMAC_RX_ST_RP)
1558                 ++st->rx_bd_runt_packet;
1559         if (ctrl & EMAC_RX_ST_SE)
1560                 ++st->rx_bd_short_event;
1561         if (ctrl & EMAC_RX_ST_AE)
1562                 ++st->rx_bd_alignment_error;
1563         if (ctrl & EMAC_RX_ST_BFCS)
1564                 ++st->rx_bd_bad_fcs;
1565         if (ctrl & EMAC_RX_ST_PTL)
1566                 ++st->rx_bd_packet_too_long;
1567         if (ctrl & EMAC_RX_ST_ORE)
1568                 ++st->rx_bd_out_of_range;
1569         if (ctrl & EMAC_RX_ST_IRE)
1570                 ++st->rx_bd_in_range;
1571 }
1572
1573 static inline void emac_rx_csum(struct emac_instance *dev,
1574                                 struct sk_buff *skb, u16 ctrl)
1575 {
1576 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1577         if (!ctrl && dev->tah_dev) {
1578                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1579                 ++dev->stats.rx_packets_csum;
1580         }
1581 #endif
1582 }
1583
1584 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1585 {
1586         if (likely(dev->rx_sg_skb != NULL)) {
1587                 int len = dev->rx_desc[slot].data_len;
1588                 int tot_len = dev->rx_sg_skb->len + len;
1589
1590                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1591                         ++dev->estats.rx_dropped_mtu;
1592                         dev_kfree_skb(dev->rx_sg_skb);
1593                         dev->rx_sg_skb = NULL;
1594                 } else {
1595                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1596                                          dev->rx_skb[slot]->data, len);
1597                         skb_put(dev->rx_sg_skb, len);
1598                         emac_recycle_rx_skb(dev, slot, len);
1599                         return 0;
1600                 }
1601         }
1602         emac_recycle_rx_skb(dev, slot, 0);
1603         return -1;
1604 }
1605
1606 /* NAPI poll context */
1607 static int emac_poll_rx(void *param, int budget)
1608 {
1609         struct emac_instance *dev = param;
1610         int slot = dev->rx_slot, received = 0;
1611
1612         DBG2(dev, "poll_rx(%d)" NL, budget);
1613
1614  again:
1615         while (budget > 0) {
1616                 int len;
1617                 struct sk_buff *skb;
1618                 u16 ctrl = dev->rx_desc[slot].ctrl;
1619
1620                 if (ctrl & MAL_RX_CTRL_EMPTY)
1621                         break;
1622
1623                 skb = dev->rx_skb[slot];
1624                 mb();
1625                 len = dev->rx_desc[slot].data_len;
1626
1627                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1628                         goto sg;
1629
1630                 ctrl &= EMAC_BAD_RX_MASK;
1631                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1632                         emac_parse_rx_error(dev, ctrl);
1633                         ++dev->estats.rx_dropped_error;
1634                         emac_recycle_rx_skb(dev, slot, 0);
1635                         len = 0;
1636                         goto next;
1637                 }
1638
1639                 if (len < ETH_HLEN) {
1640                         ++dev->estats.rx_dropped_stack;
1641                         emac_recycle_rx_skb(dev, slot, len);
1642                         goto next;
1643                 }
1644
1645                 if (len && len < EMAC_RX_COPY_THRESH) {
1646                         struct sk_buff *copy_skb =
1647                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1648                         if (unlikely(!copy_skb))
1649                                 goto oom;
1650
1651                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1652                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1653                                          len + 2);
1654                         emac_recycle_rx_skb(dev, slot, len);
1655                         skb = copy_skb;
1656                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1657                         goto oom;
1658
1659                 skb_put(skb, len);
1660         push_packet:
1661                 skb->dev = dev->ndev;
1662                 skb->protocol = eth_type_trans(skb, dev->ndev);
1663                 emac_rx_csum(dev, skb, ctrl);
1664
1665                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1666                         ++dev->estats.rx_dropped_stack;
1667         next:
1668                 ++dev->stats.rx_packets;
1669         skip:
1670                 dev->stats.rx_bytes += len;
1671                 slot = (slot + 1) % NUM_RX_BUFF;
1672                 --budget;
1673                 ++received;
1674                 continue;
1675         sg:
1676                 if (ctrl & MAL_RX_CTRL_FIRST) {
1677                         BUG_ON(dev->rx_sg_skb);
1678                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1679                                 DBG(dev, "rx OOM %d" NL, slot);
1680                                 ++dev->estats.rx_dropped_oom;
1681                                 emac_recycle_rx_skb(dev, slot, 0);
1682                         } else {
1683                                 dev->rx_sg_skb = skb;
1684                                 skb_put(skb, len);
1685                         }
1686                 } else if (!emac_rx_sg_append(dev, slot) &&
1687                            (ctrl & MAL_RX_CTRL_LAST)) {
1688
1689                         skb = dev->rx_sg_skb;
1690                         dev->rx_sg_skb = NULL;
1691
1692                         ctrl &= EMAC_BAD_RX_MASK;
1693                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1694                                 emac_parse_rx_error(dev, ctrl);
1695                                 ++dev->estats.rx_dropped_error;
1696                                 dev_kfree_skb(skb);
1697                                 len = 0;
1698                         } else
1699                                 goto push_packet;
1700                 }
1701                 goto skip;
1702         oom:
1703                 DBG(dev, "rx OOM %d" NL, slot);
1704                 /* Drop the packet and recycle skb */
1705                 ++dev->estats.rx_dropped_oom;
1706                 emac_recycle_rx_skb(dev, slot, 0);
1707                 goto next;
1708         }
1709
1710         if (received) {
1711                 DBG2(dev, "rx %d BDs" NL, received);
1712                 dev->rx_slot = slot;
1713         }
1714
1715         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1716                 mb();
1717                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1718                         DBG2(dev, "rx restart" NL);
1719                         received = 0;
1720                         goto again;
1721                 }
1722
1723                 if (dev->rx_sg_skb) {
1724                         DBG2(dev, "dropping partial rx packet" NL);
1725                         ++dev->estats.rx_dropped_error;
1726                         dev_kfree_skb(dev->rx_sg_skb);
1727                         dev->rx_sg_skb = NULL;
1728                 }
1729
1730                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1731                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1732                 emac_rx_enable(dev);
1733                 dev->rx_slot = 0;
1734         }
1735         return received;
1736 }
1737
1738 /* NAPI poll context */
1739 static int emac_peek_rx(void *param)
1740 {
1741         struct emac_instance *dev = param;
1742
1743         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1744 }
1745
1746 /* NAPI poll context */
1747 static int emac_peek_rx_sg(void *param)
1748 {
1749         struct emac_instance *dev = param;
1750
1751         int slot = dev->rx_slot;
1752         while (1) {
1753                 u16 ctrl = dev->rx_desc[slot].ctrl;
1754                 if (ctrl & MAL_RX_CTRL_EMPTY)
1755                         return 0;
1756                 else if (ctrl & MAL_RX_CTRL_LAST)
1757                         return 1;
1758
1759                 slot = (slot + 1) % NUM_RX_BUFF;
1760
1761                 /* I'm just being paranoid here :) */
1762                 if (unlikely(slot == dev->rx_slot))
1763                         return 0;
1764         }
1765 }
1766
1767 /* Hard IRQ */
1768 static void emac_rxde(void *param)
1769 {
1770         struct emac_instance *dev = param;
1771
1772         ++dev->estats.rx_stopped;
1773         emac_rx_disable_async(dev);
1774 }
1775
1776 /* Hard IRQ */
1777 static irqreturn_t emac_irq(int irq, void *dev_instance)
1778 {
1779         struct emac_instance *dev = dev_instance;
1780         struct emac_regs __iomem *p = dev->emacp;
1781         struct emac_error_stats *st = &dev->estats;
1782         u32 isr;
1783
1784         spin_lock(&dev->lock);
1785
1786         isr = in_be32(&p->isr);
1787         out_be32(&p->isr, isr);
1788
1789         DBG(dev, "isr = %08x" NL, isr);
1790
1791         if (isr & EMAC4_ISR_TXPE)
1792                 ++st->tx_parity;
1793         if (isr & EMAC4_ISR_RXPE)
1794                 ++st->rx_parity;
1795         if (isr & EMAC4_ISR_TXUE)
1796                 ++st->tx_underrun;
1797         if (isr & EMAC4_ISR_RXOE)
1798                 ++st->rx_fifo_overrun;
1799         if (isr & EMAC_ISR_OVR)
1800                 ++st->rx_overrun;
1801         if (isr & EMAC_ISR_BP)
1802                 ++st->rx_bad_packet;
1803         if (isr & EMAC_ISR_RP)
1804                 ++st->rx_runt_packet;
1805         if (isr & EMAC_ISR_SE)
1806                 ++st->rx_short_event;
1807         if (isr & EMAC_ISR_ALE)
1808                 ++st->rx_alignment_error;
1809         if (isr & EMAC_ISR_BFCS)
1810                 ++st->rx_bad_fcs;
1811         if (isr & EMAC_ISR_PTLE)
1812                 ++st->rx_packet_too_long;
1813         if (isr & EMAC_ISR_ORE)
1814                 ++st->rx_out_of_range;
1815         if (isr & EMAC_ISR_IRE)
1816                 ++st->rx_in_range;
1817         if (isr & EMAC_ISR_SQE)
1818                 ++st->tx_sqe;
1819         if (isr & EMAC_ISR_TE)
1820                 ++st->tx_errors;
1821
1822         spin_unlock(&dev->lock);
1823
1824         return IRQ_HANDLED;
1825 }
1826
1827 static struct net_device_stats *emac_stats(struct net_device *ndev)
1828 {
1829         struct emac_instance *dev = netdev_priv(ndev);
1830         struct emac_stats *st = &dev->stats;
1831         struct emac_error_stats *est = &dev->estats;
1832         struct net_device_stats *nst = &dev->nstats;
1833         unsigned long flags;
1834
1835         DBG2(dev, "stats" NL);
1836
1837         /* Compute "legacy" statistics */
1838         spin_lock_irqsave(&dev->lock, flags);
1839         nst->rx_packets = (unsigned long)st->rx_packets;
1840         nst->rx_bytes = (unsigned long)st->rx_bytes;
1841         nst->tx_packets = (unsigned long)st->tx_packets;
1842         nst->tx_bytes = (unsigned long)st->tx_bytes;
1843         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1844                                           est->rx_dropped_error +
1845                                           est->rx_dropped_resize +
1846                                           est->rx_dropped_mtu);
1847         nst->tx_dropped = (unsigned long)est->tx_dropped;
1848
1849         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1850         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1851                                               est->rx_fifo_overrun +
1852                                               est->rx_overrun);
1853         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1854                                                est->rx_alignment_error);
1855         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1856                                              est->rx_bad_fcs);
1857         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1858                                                 est->rx_bd_short_event +
1859                                                 est->rx_bd_packet_too_long +
1860                                                 est->rx_bd_out_of_range +
1861                                                 est->rx_bd_in_range +
1862                                                 est->rx_runt_packet +
1863                                                 est->rx_short_event +
1864                                                 est->rx_packet_too_long +
1865                                                 est->rx_out_of_range +
1866                                                 est->rx_in_range);
1867
1868         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1869         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1870                                               est->tx_underrun);
1871         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1872         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1873                                           est->tx_bd_excessive_collisions +
1874                                           est->tx_bd_late_collision +
1875                                           est->tx_bd_multple_collisions);
1876         spin_unlock_irqrestore(&dev->lock, flags);
1877         return nst;
1878 }
1879
1880 static struct mal_commac_ops emac_commac_ops = {
1881         .poll_tx = &emac_poll_tx,
1882         .poll_rx = &emac_poll_rx,
1883         .peek_rx = &emac_peek_rx,
1884         .rxde = &emac_rxde,
1885 };
1886
1887 static struct mal_commac_ops emac_commac_sg_ops = {
1888         .poll_tx = &emac_poll_tx,
1889         .poll_rx = &emac_poll_rx,
1890         .peek_rx = &emac_peek_rx_sg,
1891         .rxde = &emac_rxde,
1892 };
1893
1894 /* Ethtool support */
1895 static int emac_ethtool_get_settings(struct net_device *ndev,
1896                                      struct ethtool_cmd *cmd)
1897 {
1898         struct emac_instance *dev = netdev_priv(ndev);
1899
1900         cmd->supported = dev->phy.features;
1901         cmd->port = PORT_MII;
1902         cmd->phy_address = dev->phy.address;
1903         cmd->transceiver =
1904             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1905
1906         mutex_lock(&dev->link_lock);
1907         cmd->advertising = dev->phy.advertising;
1908         cmd->autoneg = dev->phy.autoneg;
1909         cmd->speed = dev->phy.speed;
1910         cmd->duplex = dev->phy.duplex;
1911         mutex_unlock(&dev->link_lock);
1912
1913         return 0;
1914 }
1915
1916 static int emac_ethtool_set_settings(struct net_device *ndev,
1917                                      struct ethtool_cmd *cmd)
1918 {
1919         struct emac_instance *dev = netdev_priv(ndev);
1920         u32 f = dev->phy.features;
1921
1922         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1923             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1924
1925         /* Basic sanity checks */
1926         if (dev->phy.address < 0)
1927                 return -EOPNOTSUPP;
1928         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1929                 return -EINVAL;
1930         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1931                 return -EINVAL;
1932         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1933                 return -EINVAL;
1934
1935         if (cmd->autoneg == AUTONEG_DISABLE) {
1936                 switch (cmd->speed) {
1937                 case SPEED_10:
1938                         if (cmd->duplex == DUPLEX_HALF
1939                             && !(f & SUPPORTED_10baseT_Half))
1940                                 return -EINVAL;
1941                         if (cmd->duplex == DUPLEX_FULL
1942                             && !(f & SUPPORTED_10baseT_Full))
1943                                 return -EINVAL;
1944                         break;
1945                 case SPEED_100:
1946                         if (cmd->duplex == DUPLEX_HALF
1947                             && !(f & SUPPORTED_100baseT_Half))
1948                                 return -EINVAL;
1949                         if (cmd->duplex == DUPLEX_FULL
1950                             && !(f & SUPPORTED_100baseT_Full))
1951                                 return -EINVAL;
1952                         break;
1953                 case SPEED_1000:
1954                         if (cmd->duplex == DUPLEX_HALF
1955                             && !(f & SUPPORTED_1000baseT_Half))
1956                                 return -EINVAL;
1957                         if (cmd->duplex == DUPLEX_FULL
1958                             && !(f & SUPPORTED_1000baseT_Full))
1959                                 return -EINVAL;
1960                         break;
1961                 default:
1962                         return -EINVAL;
1963                 }
1964
1965                 mutex_lock(&dev->link_lock);
1966                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1967                                                 cmd->duplex);
1968                 mutex_unlock(&dev->link_lock);
1969
1970         } else {
1971                 if (!(f & SUPPORTED_Autoneg))
1972                         return -EINVAL;
1973
1974                 mutex_lock(&dev->link_lock);
1975                 dev->phy.def->ops->setup_aneg(&dev->phy,
1976                                               (cmd->advertising & f) |
1977                                               (dev->phy.advertising &
1978                                                (ADVERTISED_Pause |
1979                                                 ADVERTISED_Asym_Pause)));
1980                 mutex_unlock(&dev->link_lock);
1981         }
1982         emac_force_link_update(dev);
1983
1984         return 0;
1985 }
1986
1987 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1988                                        struct ethtool_ringparam *rp)
1989 {
1990         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1991         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1992 }
1993
1994 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1995                                         struct ethtool_pauseparam *pp)
1996 {
1997         struct emac_instance *dev = netdev_priv(ndev);
1998
1999         mutex_lock(&dev->link_lock);
2000         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2001             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2002                 pp->autoneg = 1;
2003
2004         if (dev->phy.duplex == DUPLEX_FULL) {
2005                 if (dev->phy.pause)
2006                         pp->rx_pause = pp->tx_pause = 1;
2007                 else if (dev->phy.asym_pause)
2008                         pp->tx_pause = 1;
2009         }
2010         mutex_unlock(&dev->link_lock);
2011 }
2012
2013 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2014 {
2015         struct emac_instance *dev = netdev_priv(ndev);
2016
2017         return dev->tah_dev != NULL;
2018 }
2019
2020 static int emac_get_regs_len(struct emac_instance *dev)
2021 {
2022         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2023                 return sizeof(struct emac_ethtool_regs_subhdr) +
2024                         EMAC4_ETHTOOL_REGS_SIZE;
2025         else
2026                 return sizeof(struct emac_ethtool_regs_subhdr) +
2027                         EMAC_ETHTOOL_REGS_SIZE;
2028 }
2029
2030 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2031 {
2032         struct emac_instance *dev = netdev_priv(ndev);
2033         int size;
2034
2035         size = sizeof(struct emac_ethtool_regs_hdr) +
2036                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2037         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2038                 size += zmii_get_regs_len(dev->zmii_dev);
2039         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2040                 size += rgmii_get_regs_len(dev->rgmii_dev);
2041         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2042                 size += tah_get_regs_len(dev->tah_dev);
2043
2044         return size;
2045 }
2046
2047 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2048 {
2049         struct emac_ethtool_regs_subhdr *hdr = buf;
2050
2051         hdr->index = dev->cell_index;
2052         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2053                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2054                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2055                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2056         } else {
2057                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2058                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2059                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2060         }
2061 }
2062
2063 static void emac_ethtool_get_regs(struct net_device *ndev,
2064                                   struct ethtool_regs *regs, void *buf)
2065 {
2066         struct emac_instance *dev = netdev_priv(ndev);
2067         struct emac_ethtool_regs_hdr *hdr = buf;
2068
2069         hdr->components = 0;
2070         buf = hdr + 1;
2071
2072         buf = mal_dump_regs(dev->mal, buf);
2073         buf = emac_dump_regs(dev, buf);
2074         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2075                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2076                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2077         }
2078         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2079                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2080                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2081         }
2082         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2083                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2084                 buf = tah_dump_regs(dev->tah_dev, buf);
2085         }
2086 }
2087
2088 static int emac_ethtool_nway_reset(struct net_device *ndev)
2089 {
2090         struct emac_instance *dev = netdev_priv(ndev);
2091         int res = 0;
2092
2093         DBG(dev, "nway_reset" NL);
2094
2095         if (dev->phy.address < 0)
2096                 return -EOPNOTSUPP;
2097
2098         mutex_lock(&dev->link_lock);
2099         if (!dev->phy.autoneg) {
2100                 res = -EINVAL;
2101                 goto out;
2102         }
2103
2104         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2105  out:
2106         mutex_unlock(&dev->link_lock);
2107         emac_force_link_update(dev);
2108         return res;
2109 }
2110
2111 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2112 {
2113         return EMAC_ETHTOOL_STATS_COUNT;
2114 }
2115
2116 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2117                                      u8 * buf)
2118 {
2119         if (stringset == ETH_SS_STATS)
2120                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2121 }
2122
2123 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2124                                            struct ethtool_stats *estats,
2125                                            u64 * tmp_stats)
2126 {
2127         struct emac_instance *dev = netdev_priv(ndev);
2128
2129         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2130         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2131         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2132 }
2133
2134 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2135                                      struct ethtool_drvinfo *info)
2136 {
2137         struct emac_instance *dev = netdev_priv(ndev);
2138
2139         strcpy(info->driver, "ibm_emac");
2140         strcpy(info->version, DRV_VERSION);
2141         info->fw_version[0] = '\0';
2142         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2143                 dev->cell_index, dev->ofdev->node->full_name);
2144         info->n_stats = emac_ethtool_get_stats_count(ndev);
2145         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2146 }
2147
2148 static const struct ethtool_ops emac_ethtool_ops = {
2149         .get_settings = emac_ethtool_get_settings,
2150         .set_settings = emac_ethtool_set_settings,
2151         .get_drvinfo = emac_ethtool_get_drvinfo,
2152
2153         .get_regs_len = emac_ethtool_get_regs_len,
2154         .get_regs = emac_ethtool_get_regs,
2155
2156         .nway_reset = emac_ethtool_nway_reset,
2157
2158         .get_ringparam = emac_ethtool_get_ringparam,
2159         .get_pauseparam = emac_ethtool_get_pauseparam,
2160
2161         .get_rx_csum = emac_ethtool_get_rx_csum,
2162
2163         .get_strings = emac_ethtool_get_strings,
2164         .get_stats_count = emac_ethtool_get_stats_count,
2165         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2166
2167         .get_link = ethtool_op_get_link,
2168         .get_tx_csum = ethtool_op_get_tx_csum,
2169         .get_sg = ethtool_op_get_sg,
2170 };
2171
2172 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2173 {
2174         struct emac_instance *dev = netdev_priv(ndev);
2175         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2176
2177         DBG(dev, "ioctl %08x" NL, cmd);
2178
2179         if (dev->phy.address < 0)
2180                 return -EOPNOTSUPP;
2181
2182         switch (cmd) {
2183         case SIOCGMIIPHY:
2184         case SIOCDEVPRIVATE:
2185                 data[0] = dev->phy.address;
2186                 /* Fall through */
2187         case SIOCGMIIREG:
2188         case SIOCDEVPRIVATE + 1:
2189                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2190                 return 0;
2191
2192         case SIOCSMIIREG:
2193         case SIOCDEVPRIVATE + 2:
2194                 if (!capable(CAP_NET_ADMIN))
2195                         return -EPERM;
2196                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2197                 return 0;
2198         default:
2199                 return -EOPNOTSUPP;
2200         }
2201 }
2202
2203 struct emac_depentry {
2204         u32                     phandle;
2205         struct device_node      *node;
2206         struct of_device        *ofdev;
2207         void                    *drvdata;
2208 };
2209
2210 #define EMAC_DEP_MAL_IDX        0
2211 #define EMAC_DEP_ZMII_IDX       1
2212 #define EMAC_DEP_RGMII_IDX      2
2213 #define EMAC_DEP_TAH_IDX        3
2214 #define EMAC_DEP_MDIO_IDX       4
2215 #define EMAC_DEP_PREV_IDX       5
2216 #define EMAC_DEP_COUNT          6
2217
2218 static int __devinit emac_check_deps(struct emac_instance *dev,
2219                                      struct emac_depentry *deps)
2220 {
2221         int i, there = 0;
2222         struct device_node *np;
2223
2224         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2225                 /* no dependency on that item, allright */
2226                 if (deps[i].phandle == 0) {
2227                         there++;
2228                         continue;
2229                 }
2230                 /* special case for blist as the dependency might go away */
2231                 if (i == EMAC_DEP_PREV_IDX) {
2232                         np = *(dev->blist - 1);
2233                         if (np == NULL) {
2234                                 deps[i].phandle = 0;
2235                                 there++;
2236                                 continue;
2237                         }
2238                         if (deps[i].node == NULL)
2239                                 deps[i].node = of_node_get(np);
2240                 }
2241                 if (deps[i].node == NULL)
2242                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2243                 if (deps[i].node == NULL)
2244                         continue;
2245                 if (deps[i].ofdev == NULL)
2246                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2247                 if (deps[i].ofdev == NULL)
2248                         continue;
2249                 if (deps[i].drvdata == NULL)
2250                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2251                 if (deps[i].drvdata != NULL)
2252                         there++;
2253         }
2254         return (there == EMAC_DEP_COUNT);
2255 }
2256
2257 static void emac_put_deps(struct emac_instance *dev)
2258 {
2259         if (dev->mal_dev)
2260                 of_dev_put(dev->mal_dev);
2261         if (dev->zmii_dev)
2262                 of_dev_put(dev->zmii_dev);
2263         if (dev->rgmii_dev)
2264                 of_dev_put(dev->rgmii_dev);
2265         if (dev->mdio_dev)
2266                 of_dev_put(dev->mdio_dev);
2267         if (dev->tah_dev)
2268                 of_dev_put(dev->tah_dev);
2269 }
2270
2271 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2272                                         unsigned long action, void *data)
2273 {
2274         /* We are only intereted in device addition */
2275         if (action == BUS_NOTIFY_BOUND_DRIVER)
2276                 wake_up_all(&emac_probe_wait);
2277         return 0;
2278 }
2279
2280 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2281         .notifier_call = emac_of_bus_notify
2282 };
2283
2284 static int __devinit emac_wait_deps(struct emac_instance *dev)
2285 {
2286         struct emac_depentry deps[EMAC_DEP_COUNT];
2287         int i, err;
2288
2289         memset(&deps, 0, sizeof(deps));
2290
2291         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2292         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2293         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2294         if (dev->tah_ph)
2295                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2296         if (dev->mdio_ph)
2297                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2298         if (dev->blist && dev->blist > emac_boot_list)
2299                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2300         bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2301         wait_event_timeout(emac_probe_wait,
2302                            emac_check_deps(dev, deps),
2303                            EMAC_PROBE_DEP_TIMEOUT);
2304         bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2305         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2306         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2307                 if (deps[i].node)
2308                         of_node_put(deps[i].node);
2309                 if (err && deps[i].ofdev)
2310                         of_dev_put(deps[i].ofdev);
2311         }
2312         if (err == 0) {
2313                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2314                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2315                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2316                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2317                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2318         }
2319         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2320                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2321         return err;
2322 }
2323
2324 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2325                                          u32 *val, int fatal)
2326 {
2327         int len;
2328         const u32 *prop = of_get_property(np, name, &len);
2329         if (prop == NULL || len < sizeof(u32)) {
2330                 if (fatal)
2331                         printk(KERN_ERR "%s: missing %s property\n",
2332                                np->full_name, name);
2333                 return -ENODEV;
2334         }
2335         *val = *prop;
2336         return 0;
2337 }
2338
2339 static int __devinit emac_init_phy(struct emac_instance *dev)
2340 {
2341         struct device_node *np = dev->ofdev->node;
2342         struct net_device *ndev = dev->ndev;
2343         u32 phy_map, adv;
2344         int i;
2345
2346         dev->phy.dev = ndev;
2347         dev->phy.mode = dev->phy_mode;
2348
2349         /* PHY-less configuration.
2350          * XXX I probably should move these settings to the dev tree
2351          */
2352         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2353                 emac_reset(dev);
2354
2355                 /* PHY-less configuration.
2356                  * XXX I probably should move these settings to the dev tree
2357                  */
2358                 dev->phy.address = -1;
2359                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2360                 dev->phy.pause = 1;
2361
2362                 return 0;
2363         }
2364
2365         mutex_lock(&emac_phy_map_lock);
2366         phy_map = dev->phy_map | busy_phy_map;
2367
2368         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2369
2370         dev->phy.mdio_read = emac_mdio_read;
2371         dev->phy.mdio_write = emac_mdio_write;
2372
2373         /* Enable internal clock source */
2374 #ifdef CONFIG_PPC_DCR_NATIVE
2375         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2376                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2377 #endif
2378         /* PHY clock workaround */
2379         emac_rx_clk_tx(dev);
2380
2381         /* Enable internal clock source on 440GX*/
2382 #ifdef CONFIG_PPC_DCR_NATIVE
2383         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2384                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2385 #endif
2386         /* Configure EMAC with defaults so we can at least use MDIO
2387          * This is needed mostly for 440GX
2388          */
2389         if (emac_phy_gpcs(dev->phy.mode)) {
2390                 /* XXX
2391                  * Make GPCS PHY address equal to EMAC index.
2392                  * We probably should take into account busy_phy_map
2393                  * and/or phy_map here.
2394                  *
2395                  * Note that the busy_phy_map is currently global
2396                  * while it should probably be per-ASIC...
2397                  */
2398                 dev->phy.address = dev->cell_index;
2399         }
2400
2401         emac_configure(dev);
2402
2403         if (dev->phy_address != 0xffffffff)
2404                 phy_map = ~(1 << dev->phy_address);
2405
2406         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2407                 if (!(phy_map & 1)) {
2408                         int r;
2409                         busy_phy_map |= 1 << i;
2410
2411                         /* Quick check if there is a PHY at the address */
2412                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2413                         if (r == 0xffff || r < 0)
2414                                 continue;
2415                         if (!emac_mii_phy_probe(&dev->phy, i))
2416                                 break;
2417                 }
2418
2419         /* Enable external clock source */
2420 #ifdef CONFIG_PPC_DCR_NATIVE
2421         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2422                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2423 #endif
2424         mutex_unlock(&emac_phy_map_lock);
2425         if (i == 0x20) {
2426                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2427                 return -ENXIO;
2428         }
2429
2430         /* Init PHY */
2431         if (dev->phy.def->ops->init)
2432                 dev->phy.def->ops->init(&dev->phy);
2433
2434         /* Disable any PHY features not supported by the platform */
2435         dev->phy.def->features &= ~dev->phy_feat_exc;
2436
2437         /* Setup initial link parameters */
2438         if (dev->phy.features & SUPPORTED_Autoneg) {
2439                 adv = dev->phy.features;
2440                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2441                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2442                 /* Restart autonegotiation */
2443                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2444         } else {
2445                 u32 f = dev->phy.def->features;
2446                 int speed = SPEED_10, fd = DUPLEX_HALF;
2447
2448                 /* Select highest supported speed/duplex */
2449                 if (f & SUPPORTED_1000baseT_Full) {
2450                         speed = SPEED_1000;
2451                         fd = DUPLEX_FULL;
2452                 } else if (f & SUPPORTED_1000baseT_Half)
2453                         speed = SPEED_1000;
2454                 else if (f & SUPPORTED_100baseT_Full) {
2455                         speed = SPEED_100;
2456                         fd = DUPLEX_FULL;
2457                 } else if (f & SUPPORTED_100baseT_Half)
2458                         speed = SPEED_100;
2459                 else if (f & SUPPORTED_10baseT_Full)
2460                         fd = DUPLEX_FULL;
2461
2462                 /* Force link parameters */
2463                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2464         }
2465         return 0;
2466 }
2467
2468 static int __devinit emac_init_config(struct emac_instance *dev)
2469 {
2470         struct device_node *np = dev->ofdev->node;
2471         const void *p;
2472         unsigned int plen;
2473         const char *pm, *phy_modes[] = {
2474                 [PHY_MODE_NA] = "",
2475                 [PHY_MODE_MII] = "mii",
2476                 [PHY_MODE_RMII] = "rmii",
2477                 [PHY_MODE_SMII] = "smii",
2478                 [PHY_MODE_RGMII] = "rgmii",
2479                 [PHY_MODE_TBI] = "tbi",
2480                 [PHY_MODE_GMII] = "gmii",
2481                 [PHY_MODE_RTBI] = "rtbi",
2482                 [PHY_MODE_SGMII] = "sgmii",
2483         };
2484
2485         /* Read config from device-tree */
2486         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2487                 return -ENXIO;
2488         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2489                 return -ENXIO;
2490         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2491                 return -ENXIO;
2492         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2493                 return -ENXIO;
2494         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2495                 dev->max_mtu = 1500;
2496         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2497                 dev->rx_fifo_size = 2048;
2498         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2499                 dev->tx_fifo_size = 2048;
2500         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2501                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2502         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2503                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2504         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2505                 dev->phy_address = 0xffffffff;
2506         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2507                 dev->phy_map = 0xffffffff;
2508         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2509                 return -ENXIO;
2510         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2511                 dev->tah_ph = 0;
2512         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2513                 dev->tah_port = 0;
2514         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2515                 dev->mdio_ph = 0;
2516         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2517                 dev->zmii_ph = 0;;
2518         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2519                 dev->zmii_port = 0xffffffff;;
2520         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2521                 dev->rgmii_ph = 0;;
2522         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2523                 dev->rgmii_port = 0xffffffff;;
2524         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2525                 dev->fifo_entry_size = 16;
2526         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2527                 dev->mal_burst_size = 256;
2528
2529         /* PHY mode needs some decoding */
2530         dev->phy_mode = PHY_MODE_NA;
2531         pm = of_get_property(np, "phy-mode", &plen);
2532         if (pm != NULL) {
2533                 int i;
2534                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2535                         if (!strcasecmp(pm, phy_modes[i])) {
2536                                 dev->phy_mode = i;
2537                                 break;
2538                         }
2539         }
2540
2541         /* Backward compat with non-final DT */
2542         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2543                 u32 nmode = *(const u32 *)pm;
2544                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2545                         dev->phy_mode = nmode;
2546         }
2547
2548         /* Check EMAC version */
2549         if (of_device_is_compatible(np, "ibm,emac4")) {
2550                 dev->features |= EMAC_FTR_EMAC4;
2551                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2552                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2553         } else {
2554                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2555                     of_device_is_compatible(np, "ibm,emac-440gr"))
2556                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2557         }
2558
2559         /* Fixup some feature bits based on the device tree */
2560         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2561                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2562         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2563                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2564
2565         /* CAB lacks the appropriate properties */
2566         if (of_device_is_compatible(np, "ibm,emac-axon"))
2567                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2568                         EMAC_FTR_STACR_OC_INVERT;
2569
2570         /* Enable TAH/ZMII/RGMII features as found */
2571         if (dev->tah_ph != 0) {
2572 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2573                 dev->features |= EMAC_FTR_HAS_TAH;
2574 #else
2575                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2576                        np->full_name);
2577                 return -ENXIO;
2578 #endif
2579         }
2580
2581         if (dev->zmii_ph != 0) {
2582 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2583                 dev->features |= EMAC_FTR_HAS_ZMII;
2584 #else
2585                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2586                        np->full_name);
2587                 return -ENXIO;
2588 #endif
2589         }
2590
2591         if (dev->rgmii_ph != 0) {
2592 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2593                 dev->features |= EMAC_FTR_HAS_RGMII;
2594 #else
2595                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2596                        np->full_name);
2597                 return -ENXIO;
2598 #endif
2599         }
2600
2601         /* Read MAC-address */
2602         p = of_get_property(np, "local-mac-address", NULL);
2603         if (p == NULL) {
2604                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2605                        np->full_name);
2606                 return -ENXIO;
2607         }
2608         memcpy(dev->ndev->dev_addr, p, 6);
2609
2610         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2611         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2612         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2613         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2614         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2615
2616         return 0;
2617 }
2618
2619 static int __devinit emac_probe(struct of_device *ofdev,
2620                                 const struct of_device_id *match)
2621 {
2622         struct net_device *ndev;
2623         struct emac_instance *dev;
2624         struct device_node *np = ofdev->node;
2625         struct device_node **blist = NULL;
2626         int err, i;
2627
2628         /* Skip unused/unwired EMACS.  We leave the check for an unused
2629          * property here for now, but new flat device trees should set a
2630          * status property to "disabled" instead.
2631          */
2632         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2633                 return -ENODEV;
2634
2635         /* Find ourselves in the bootlist if we are there */
2636         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2637                 if (emac_boot_list[i] == np)
2638                         blist = &emac_boot_list[i];
2639
2640         /* Allocate our net_device structure */
2641         err = -ENOMEM;
2642         ndev = alloc_etherdev(sizeof(struct emac_instance));
2643         if (!ndev) {
2644                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2645                        np->full_name);
2646                 goto err_gone;
2647         }
2648         dev = netdev_priv(ndev);
2649         dev->ndev = ndev;
2650         dev->ofdev = ofdev;
2651         dev->blist = blist;
2652         SET_NETDEV_DEV(ndev, &ofdev->dev);
2653
2654         /* Initialize some embedded data structures */
2655         mutex_init(&dev->mdio_lock);
2656         mutex_init(&dev->link_lock);
2657         spin_lock_init(&dev->lock);
2658         INIT_WORK(&dev->reset_work, emac_reset_work);
2659
2660         /* Init various config data based on device-tree */
2661         err = emac_init_config(dev);
2662         if (err != 0)
2663                 goto err_free;
2664
2665         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2666         dev->emac_irq = irq_of_parse_and_map(np, 0);
2667         dev->wol_irq = irq_of_parse_and_map(np, 1);
2668         if (dev->emac_irq == NO_IRQ) {
2669                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2670                 goto err_free;
2671         }
2672         ndev->irq = dev->emac_irq;
2673
2674         /* Map EMAC regs */
2675         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2676                 printk(KERN_ERR "%s: Can't get registers address\n",
2677                        np->full_name);
2678                 goto err_irq_unmap;
2679         }
2680         // TODO : request_mem_region
2681         dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2682         if (dev->emacp == NULL) {
2683                 printk(KERN_ERR "%s: Can't map device registers!\n",
2684                        np->full_name);
2685                 err = -ENOMEM;
2686                 goto err_irq_unmap;
2687         }
2688
2689         /* Wait for dependent devices */
2690         err = emac_wait_deps(dev);
2691         if (err) {
2692                 printk(KERN_ERR
2693                        "%s: Timeout waiting for dependent devices\n",
2694                        np->full_name);
2695                 /*  display more info about what's missing ? */
2696                 goto err_reg_unmap;
2697         }
2698         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2699         if (dev->mdio_dev != NULL)
2700                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2701
2702         /* Register with MAL */
2703         dev->commac.ops = &emac_commac_ops;
2704         dev->commac.dev = dev;
2705         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2706         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2707         err = mal_register_commac(dev->mal, &dev->commac);
2708         if (err) {
2709                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2710                        np->full_name, dev->mal_dev->node->full_name);
2711                 goto err_rel_deps;
2712         }
2713         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2714         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2715
2716         /* Get pointers to BD rings */
2717         dev->tx_desc =
2718             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2719         dev->rx_desc =
2720             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2721
2722         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2723         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2724
2725         /* Clean rings */
2726         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2727         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2728         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2729         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2730
2731         /* Attach to ZMII, if needed */
2732         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2733             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2734                 goto err_unreg_commac;
2735
2736         /* Attach to RGMII, if needed */
2737         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2738             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2739                 goto err_detach_zmii;
2740
2741         /* Attach to TAH, if needed */
2742         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2743             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2744                 goto err_detach_rgmii;
2745
2746         /* Set some link defaults before we can find out real parameters */
2747         dev->phy.speed = SPEED_100;
2748         dev->phy.duplex = DUPLEX_FULL;
2749         dev->phy.autoneg = AUTONEG_DISABLE;
2750         dev->phy.pause = dev->phy.asym_pause = 0;
2751         dev->stop_timeout = STOP_TIMEOUT_100;
2752         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2753
2754         /* Find PHY if any */
2755         err = emac_init_phy(dev);
2756         if (err != 0)
2757                 goto err_detach_tah;
2758
2759         /* Fill in the driver function table */
2760         ndev->open = &emac_open;
2761         if (dev->tah_dev)
2762                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2763         ndev->tx_timeout = &emac_tx_timeout;
2764         ndev->watchdog_timeo = 5 * HZ;
2765         ndev->stop = &emac_close;
2766         ndev->get_stats = &emac_stats;
2767         ndev->set_multicast_list = &emac_set_multicast_list;
2768         ndev->do_ioctl = &emac_ioctl;
2769         if (emac_phy_supports_gige(dev->phy_mode)) {
2770                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2771                 ndev->change_mtu = &emac_change_mtu;
2772                 dev->commac.ops = &emac_commac_sg_ops;
2773         } else {
2774                 ndev->hard_start_xmit = &emac_start_xmit;
2775         }
2776         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2777
2778         netif_carrier_off(ndev);
2779         netif_stop_queue(ndev);
2780
2781         err = register_netdev(ndev);
2782         if (err) {
2783                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2784                        np->full_name, err);
2785                 goto err_detach_tah;
2786         }
2787
2788         /* Set our drvdata last as we don't want them visible until we are
2789          * fully initialized
2790          */
2791         wmb();
2792         dev_set_drvdata(&ofdev->dev, dev);
2793
2794         /* There's a new kid in town ! Let's tell everybody */
2795         wake_up_all(&emac_probe_wait);
2796
2797
2798         printk(KERN_INFO
2799                "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2800                ndev->name, dev->cell_index, np->full_name,
2801                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2802                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2803
2804         if (dev->phy.address >= 0)
2805                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2806                        dev->phy.def->name, dev->phy.address);
2807
2808         emac_dbg_register(dev);
2809
2810         /* Life is good */
2811         return 0;
2812
2813         /* I have a bad feeling about this ... */
2814
2815  err_detach_tah:
2816         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2817                 tah_detach(dev->tah_dev, dev->tah_port);
2818  err_detach_rgmii:
2819         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2820                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2821  err_detach_zmii:
2822         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2823                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2824  err_unreg_commac:
2825         mal_unregister_commac(dev->mal, &dev->commac);
2826  err_rel_deps:
2827         emac_put_deps(dev);
2828  err_reg_unmap:
2829         iounmap(dev->emacp);
2830  err_irq_unmap:
2831         if (dev->wol_irq != NO_IRQ)
2832                 irq_dispose_mapping(dev->wol_irq);
2833         if (dev->emac_irq != NO_IRQ)
2834                 irq_dispose_mapping(dev->emac_irq);
2835  err_free:
2836         kfree(ndev);
2837  err_gone:
2838         /* if we were on the bootlist, remove us as we won't show up and
2839          * wake up all waiters to notify them in case they were waiting
2840          * on us
2841          */
2842         if (blist) {
2843                 *blist = NULL;
2844                 wake_up_all(&emac_probe_wait);
2845         }
2846         return err;
2847 }
2848
2849 static int __devexit emac_remove(struct of_device *ofdev)
2850 {
2851         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2852
2853         DBG(dev, "remove" NL);
2854
2855         dev_set_drvdata(&ofdev->dev, NULL);
2856
2857         unregister_netdev(dev->ndev);
2858
2859         flush_scheduled_work();
2860
2861         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2862                 tah_detach(dev->tah_dev, dev->tah_port);
2863         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2864                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2865         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2866                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2867
2868         mal_unregister_commac(dev->mal, &dev->commac);
2869         emac_put_deps(dev);
2870
2871         emac_dbg_unregister(dev);
2872         iounmap(dev->emacp);
2873
2874         if (dev->wol_irq != NO_IRQ)
2875                 irq_dispose_mapping(dev->wol_irq);
2876         if (dev->emac_irq != NO_IRQ)
2877                 irq_dispose_mapping(dev->emac_irq);
2878
2879         kfree(dev->ndev);
2880
2881         return 0;
2882 }
2883
2884 /* XXX Features in here should be replaced by properties... */
2885 static struct of_device_id emac_match[] =
2886 {
2887         {
2888                 .type           = "network",
2889                 .compatible     = "ibm,emac",
2890         },
2891         {
2892                 .type           = "network",
2893                 .compatible     = "ibm,emac4",
2894         },
2895         {},
2896 };
2897
2898 static struct of_platform_driver emac_driver = {
2899         .name = "emac",
2900         .match_table = emac_match,
2901
2902         .probe = emac_probe,
2903         .remove = emac_remove,
2904 };
2905
2906 static void __init emac_make_bootlist(void)
2907 {
2908         struct device_node *np = NULL;
2909         int j, max, i = 0, k;
2910         int cell_indices[EMAC_BOOT_LIST_SIZE];
2911
2912         /* Collect EMACs */
2913         while((np = of_find_all_nodes(np)) != NULL) {
2914                 const u32 *idx;
2915
2916                 if (of_match_node(emac_match, np) == NULL)
2917                         continue;
2918                 if (of_get_property(np, "unused", NULL))
2919                         continue;
2920                 idx = of_get_property(np, "cell-index", NULL);
2921                 if (idx == NULL)
2922                         continue;
2923                 cell_indices[i] = *idx;
2924                 emac_boot_list[i++] = of_node_get(np);
2925                 if (i >= EMAC_BOOT_LIST_SIZE) {
2926                         of_node_put(np);
2927                         break;
2928                 }
2929         }
2930         max = i;
2931
2932         /* Bubble sort them (doh, what a creative algorithm :-) */
2933         for (i = 0; max > 1 && (i < (max - 1)); i++)
2934                 for (j = i; j < max; j++) {
2935                         if (cell_indices[i] > cell_indices[j]) {
2936                                 np = emac_boot_list[i];
2937                                 emac_boot_list[i] = emac_boot_list[j];
2938                                 emac_boot_list[j] = np;
2939                                 k = cell_indices[i];
2940                                 cell_indices[i] = cell_indices[j];
2941                                 cell_indices[j] = k;
2942                         }
2943                 }
2944 }
2945
2946 static int __init emac_init(void)
2947 {
2948         int rc;
2949
2950         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2951
2952         /* Init debug stuff */
2953         emac_init_debug();
2954
2955         /* Build EMAC boot list */
2956         emac_make_bootlist();
2957
2958         /* Init submodules */
2959         rc = mal_init();
2960         if (rc)
2961                 goto err;
2962         rc = zmii_init();
2963         if (rc)
2964                 goto err_mal;
2965         rc = rgmii_init();
2966         if (rc)
2967                 goto err_zmii;
2968         rc = tah_init();
2969         if (rc)
2970                 goto err_rgmii;
2971         rc = of_register_platform_driver(&emac_driver);
2972         if (rc)
2973                 goto err_tah;
2974
2975         return 0;
2976
2977  err_tah:
2978         tah_exit();
2979  err_rgmii:
2980         rgmii_exit();
2981  err_zmii:
2982         zmii_exit();
2983  err_mal:
2984         mal_exit();
2985  err:
2986         return rc;
2987 }
2988
2989 static void __exit emac_exit(void)
2990 {
2991         int i;
2992
2993         of_unregister_platform_driver(&emac_driver);
2994
2995         tah_exit();
2996         rgmii_exit();
2997         zmii_exit();
2998         mal_exit();
2999         emac_fini_debug();
3000
3001         /* Destroy EMAC boot list */
3002         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3003                 if (emac_boot_list[i])
3004                         of_node_put(emac_boot_list[i]);
3005 }
3006
3007 module_init(emac_init);
3008 module_exit(emac_exit);