[PATCH] PPC 44x EMAC driver: add 440SPe support
[linux-2.6] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40
41 #include <asm/processor.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44 #include <asm/uaccess.h>
45 #include <asm/ocp.h>
46
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
49
50 /*
51  * Lack of dma_unmap_???? calls is intentional.
52  *
53  * API-correct usage requires additional support state information to be 
54  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55  * EMAC design (e.g. TX buffer passed from network stack can be split into
56  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57  * maintaining such information will add additional overhead.
58  * Current DMA API implementation for 4xx processors only ensures cache coherency
59  * and dma_unmap_???? routines are empty and are likely to stay this way.
60  * I decided to omit dma_unmap_??? calls because I don't want to add additional
61  * complexity just for the sake of following some abstract API, when it doesn't
62  * add any real benefit to the driver. I understand that this decision maybe 
63  * controversial, but I really tried to make code API-correct and efficient 
64  * at the same time and didn't come up with code I liked :(.                --ebs
65  */
66
67 #define DRV_NAME        "emac"
68 #define DRV_VERSION     "3.53"
69 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
70
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_AUTHOR
73     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
75
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
78
79 /* If packet size is less than this number, we allocate small skb and copy packet 
80  * contents into it instead of just sending original big skb up
81  */
82 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
83
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85  * to avoid re-using the same PHY ID in cases where the arch didn't
86  * setup precise phy_map entries
87  */
88 static u32 busy_phy_map;
89
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92  * with PHY RX clock problem.
93  * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94  * also allows controlling each EMAC clock
95  */
96 static inline void EMAC_RX_CLK_TX(int idx)
97 {
98         unsigned long flags;
99         local_irq_save(flags);
100
101 #if defined(CONFIG_405EP)
102         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
103 #else /* CONFIG_440EP */
104         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105 #endif
106
107         local_irq_restore(flags);
108 }
109
110 static inline void EMAC_RX_CLK_DEFAULT(int idx)
111 {
112         unsigned long flags;
113         local_irq_save(flags);
114
115 #if defined(CONFIG_405EP)
116         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
117 #else /* CONFIG_440EP */
118         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
119 #endif
120
121         local_irq_restore(flags);
122 }
123 #else
124 #define EMAC_RX_CLK_TX(idx)             ((void)0)
125 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
126 #endif
127
128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
130  * unfortunately this is less flexible than 440EP case, because it's a global 
131  * setting for all EMACs, therefore we do this clock trick only during probe.
132  */
133 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
134                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
136                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137 #else
138 #define EMAC_CLK_INTERNAL               ((void)0)
139 #define EMAC_CLK_EXTERNAL               ((void)0)
140 #endif
141
142 /* I don't want to litter system log with timeout errors 
143  * when we have brain-damaged PHY.
144  */
145 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
146                                              const char *error)
147 {
148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149         DBG("%d: %s" NL, dev->def->index, error);
150 #else
151         if (net_ratelimit())
152                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
153 #endif
154 }
155
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON        HZ
158 #define PHY_POLL_LINK_OFF       (HZ / 5)
159
160 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
162         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173         "tx_bd_excessive_collisions", "tx_bd_late_collision",
174         "tx_bd_multple_collisions", "tx_bd_single_collision",
175         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
176         "tx_errors"
177 };
178
179 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
180 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
181
182 static inline int emac_phy_supports_gige(int phy_mode)
183 {
184         return  phy_mode == PHY_MODE_GMII ||
185                 phy_mode == PHY_MODE_RGMII ||
186                 phy_mode == PHY_MODE_TBI ||
187                 phy_mode == PHY_MODE_RTBI;
188 }
189
190 static inline int emac_phy_gpcs(int phy_mode)
191 {
192         return  phy_mode == PHY_MODE_TBI ||
193                 phy_mode == PHY_MODE_RTBI;
194 }
195
196 static inline void emac_tx_enable(struct ocp_enet_private *dev)
197 {
198         struct emac_regs *p = dev->emacp;
199         unsigned long flags;
200         u32 r;
201
202         local_irq_save(flags);
203
204         DBG("%d: tx_enable" NL, dev->def->index);
205
206         r = in_be32(&p->mr0);
207         if (!(r & EMAC_MR0_TXE))
208                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
209         local_irq_restore(flags);
210 }
211
212 static void emac_tx_disable(struct ocp_enet_private *dev)
213 {
214         struct emac_regs *p = dev->emacp;
215         unsigned long flags;
216         u32 r;
217
218         local_irq_save(flags);
219
220         DBG("%d: tx_disable" NL, dev->def->index);
221
222         r = in_be32(&p->mr0);
223         if (r & EMAC_MR0_TXE) {
224                 int n = 300;
225                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
226                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
227                         --n;
228                 if (unlikely(!n))
229                         emac_report_timeout_error(dev, "TX disable timeout");
230         }
231         local_irq_restore(flags);
232 }
233
234 static void emac_rx_enable(struct ocp_enet_private *dev)
235 {
236         struct emac_regs *p = dev->emacp;
237         unsigned long flags;
238         u32 r;
239
240         local_irq_save(flags);
241         if (unlikely(dev->commac.rx_stopped))
242                 goto out;
243
244         DBG("%d: rx_enable" NL, dev->def->index);
245
246         r = in_be32(&p->mr0);
247         if (!(r & EMAC_MR0_RXE)) {
248                 if (unlikely(!(r & EMAC_MR0_RXI))) {
249                         /* Wait if previous async disable is still in progress */
250                         int n = 100;
251                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
252                                 --n;
253                         if (unlikely(!n))
254                                 emac_report_timeout_error(dev,
255                                                           "RX disable timeout");
256                 }
257                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
258         }
259       out:
260         local_irq_restore(flags);
261 }
262
263 static void emac_rx_disable(struct ocp_enet_private *dev)
264 {
265         struct emac_regs *p = dev->emacp;
266         unsigned long flags;
267         u32 r;
268
269         local_irq_save(flags);
270
271         DBG("%d: rx_disable" NL, dev->def->index);
272
273         r = in_be32(&p->mr0);
274         if (r & EMAC_MR0_RXE) {
275                 int n = 300;
276                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
277                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
278                         --n;
279                 if (unlikely(!n))
280                         emac_report_timeout_error(dev, "RX disable timeout");
281         }
282         local_irq_restore(flags);
283 }
284
285 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
286 {
287         struct emac_regs *p = dev->emacp;
288         unsigned long flags;
289         u32 r;
290
291         local_irq_save(flags);
292
293         DBG("%d: rx_disable_async" NL, dev->def->index);
294
295         r = in_be32(&p->mr0);
296         if (r & EMAC_MR0_RXE)
297                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298         local_irq_restore(flags);
299 }
300
301 static int emac_reset(struct ocp_enet_private *dev)
302 {
303         struct emac_regs *p = dev->emacp;
304         unsigned long flags;
305         int n = 20;
306
307         DBG("%d: reset" NL, dev->def->index);
308
309         local_irq_save(flags);
310
311         if (!dev->reset_failed) {
312                 /* 40x erratum suggests stopping RX channel before reset,
313                  * we stop TX as well
314                  */
315                 emac_rx_disable(dev);
316                 emac_tx_disable(dev);
317         }
318
319         out_be32(&p->mr0, EMAC_MR0_SRST);
320         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
321                 --n;
322         local_irq_restore(flags);
323
324         if (n) {
325                 dev->reset_failed = 0;
326                 return 0;
327         } else {
328                 emac_report_timeout_error(dev, "reset timeout");
329                 dev->reset_failed = 1;
330                 return -ETIMEDOUT;
331         }
332 }
333
334 static void emac_hash_mc(struct ocp_enet_private *dev)
335 {
336         struct emac_regs *p = dev->emacp;
337         u16 gaht[4] = { 0 };
338         struct dev_mc_list *dmi;
339
340         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
341
342         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
343                 int bit;
344                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
345                      dev->def->index,
346                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
347                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
348
349                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
350                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
351         }
352         out_be32(&p->gaht1, gaht[0]);
353         out_be32(&p->gaht2, gaht[1]);
354         out_be32(&p->gaht3, gaht[2]);
355         out_be32(&p->gaht4, gaht[3]);
356 }
357
358 static inline u32 emac_iff2rmr(struct net_device *ndev)
359 {
360         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
361             EMAC_RMR_BASE;
362
363         if (ndev->flags & IFF_PROMISC)
364                 r |= EMAC_RMR_PME;
365         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
366                 r |= EMAC_RMR_PMME;
367         else if (ndev->mc_count > 0)
368                 r |= EMAC_RMR_MAE;
369
370         return r;
371 }
372
373 static inline int emac_opb_mhz(void)
374 {
375         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
376 }
377
378 /* BHs disabled */
379 static int emac_configure(struct ocp_enet_private *dev)
380 {
381         struct emac_regs *p = dev->emacp;
382         struct net_device *ndev = dev->ndev;
383         int gige;
384         u32 r;
385
386         DBG("%d: configure" NL, dev->def->index);
387
388         if (emac_reset(dev) < 0)
389                 return -ETIMEDOUT;
390
391         tah_reset(dev->tah_dev);
392
393         /* Mode register */
394         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
395         if (dev->phy.duplex == DUPLEX_FULL)
396                 r |= EMAC_MR1_FDE;
397         switch (dev->phy.speed) {
398         case SPEED_1000:
399                 if (emac_phy_gpcs(dev->phy.mode)) {
400                         r |= EMAC_MR1_MF_1000GPCS |
401                             EMAC_MR1_MF_IPPA(dev->phy.address);
402
403                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
404                          * identify this GPCS PHY later.
405                          */
406                         out_be32(&p->ipcr, 0xdeadbeef);
407                 } else
408                         r |= EMAC_MR1_MF_1000;
409                 r |= EMAC_MR1_RFS_16K;
410                 gige = 1;
411                 
412                 if (dev->ndev->mtu > ETH_DATA_LEN)
413                         r |= EMAC_MR1_JPSM;
414                 break;
415         case SPEED_100:
416                 r |= EMAC_MR1_MF_100;
417                 /* Fall through */
418         default:
419                 r |= EMAC_MR1_RFS_4K;
420                 gige = 0;
421                 break;
422         }
423
424         if (dev->rgmii_dev)
425                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
426                                 dev->phy.speed);
427         else
428                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
429
430 #if !defined(CONFIG_40x)
431         /* on 40x erratum forces us to NOT use integrated flow control, 
432          * let's hope it works on 44x ;)
433          */
434         if (dev->phy.duplex == DUPLEX_FULL) {
435                 if (dev->phy.pause)
436                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
437                 else if (dev->phy.asym_pause)
438                         r |= EMAC_MR1_APP;
439         }
440 #endif
441         out_be32(&p->mr1, r);
442
443         /* Set individual MAC address */
444         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
445         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
446                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
447                  ndev->dev_addr[5]);
448
449         /* VLAN Tag Protocol ID */
450         out_be32(&p->vtpid, 0x8100);
451
452         /* Receive mode register */
453         r = emac_iff2rmr(ndev);
454         if (r & EMAC_RMR_MAE)
455                 emac_hash_mc(dev);
456         out_be32(&p->rmr, r);
457
458         /* FIFOs thresholds */
459         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
460                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
461         out_be32(&p->tmr1, r);
462         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
463
464         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465            there should be still enough space in FIFO to allow the our link
466            partner time to process this frame and also time to send PAUSE 
467            frame itself.
468
469            Here is the worst case scenario for the RX FIFO "headroom"
470            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
471
472            1) One maximum-length frame on TX                    1522 bytes
473            2) One PAUSE frame time                                64 bytes
474            3) PAUSE frame decode time allowance                   64 bytes
475            4) One maximum-length frame on RX                    1522 bytes
476            5) Round-trip propagation delay of the link (100Mb)    15 bytes
477            ----------       
478            3187 bytes
479
480            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
482          */
483         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
484                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
485         out_be32(&p->rwmr, r);
486
487         /* Set PAUSE timer to the maximum */
488         out_be32(&p->ptr, 0xffff);
489
490         /* IRQ sources */
491         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
492                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
493                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
494                  EMAC_ISR_IRE | EMAC_ISR_TE);
495                  
496         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497         if (emac_phy_gpcs(dev->phy.mode)) 
498                 mii_reset_phy(&dev->phy);
499                  
500         return 0;
501 }
502
503 /* BHs disabled */
504 static void emac_reinitialize(struct ocp_enet_private *dev)
505 {
506         DBG("%d: reinitialize" NL, dev->def->index);
507
508         if (!emac_configure(dev)) {
509                 emac_tx_enable(dev);
510                 emac_rx_enable(dev);
511         }
512 }
513
514 /* BHs disabled */
515 static void emac_full_tx_reset(struct net_device *ndev)
516 {
517         struct ocp_enet_private *dev = ndev->priv;
518         struct ocp_func_emac_data *emacdata = dev->def->additions;
519
520         DBG("%d: full_tx_reset" NL, dev->def->index);
521
522         emac_tx_disable(dev);
523         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
524         emac_clean_tx_ring(dev);
525         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
526
527         emac_configure(dev);
528
529         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
530         emac_tx_enable(dev);
531         emac_rx_enable(dev);
532
533         netif_wake_queue(ndev);
534 }
535
536 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
537 {
538         struct emac_regs *p = dev->emacp;
539         u32 r;
540         int n;
541
542         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
543
544         /* Enable proper MDIO port */
545         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
546
547         /* Wait for management interface to become idle */
548         n = 10;
549         while (!emac_phy_done(in_be32(&p->stacr))) {
550                 udelay(1);
551                 if (!--n)
552                         goto to;
553         }
554
555         /* Issue read command */
556         out_be32(&p->stacr,
557                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558                  (reg & EMAC_STACR_PRA_MASK)
559                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
560                  | EMAC_STACR_START);
561
562         /* Wait for read to complete */
563         n = 100;
564         while (!emac_phy_done(r = in_be32(&p->stacr))) {
565                 udelay(1);
566                 if (!--n)
567                         goto to;
568         }
569
570         if (unlikely(r & EMAC_STACR_PHYE)) {
571                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
572                     id, reg);
573                 return -EREMOTEIO;
574         }
575
576         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
577         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
578         return r;
579       to:
580         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
581         return -ETIMEDOUT;
582 }
583
584 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
585                               u16 val)
586 {
587         struct emac_regs *p = dev->emacp;
588         int n;
589
590         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
591              val);
592
593         /* Enable proper MDIO port */
594         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
595
596         /* Wait for management interface to be idle */
597         n = 10;
598         while (!emac_phy_done(in_be32(&p->stacr))) {
599                 udelay(1);
600                 if (!--n)
601                         goto to;
602         }
603
604         /* Issue write command */
605         out_be32(&p->stacr,
606                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
607                  (reg & EMAC_STACR_PRA_MASK) |
608                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
609                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
610
611         /* Wait for write to complete */
612         n = 100;
613         while (!emac_phy_done(in_be32(&p->stacr))) {
614                 udelay(1);
615                 if (!--n)
616                         goto to;
617         }
618         return;
619       to:
620         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
621 }
622
623 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
624 {
625         struct ocp_enet_private *dev = ndev->priv;
626         int res;
627
628         local_bh_disable();
629         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
630                                (u8) reg);
631         local_bh_enable();
632         return res;
633 }
634
635 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
636 {
637         struct ocp_enet_private *dev = ndev->priv;
638
639         local_bh_disable();
640         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
641                           (u8) reg, (u16) val);
642         local_bh_enable();
643 }
644
645 /* BHs disabled */
646 static void emac_set_multicast_list(struct net_device *ndev)
647 {
648         struct ocp_enet_private *dev = ndev->priv;
649         struct emac_regs *p = dev->emacp;
650         u32 rmr = emac_iff2rmr(ndev);
651
652         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
653         BUG_ON(!netif_running(dev->ndev));
654
655         /* I decided to relax register access rules here to avoid
656          * full EMAC reset.
657          *
658          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
659          * in MR1 register and do a full EMAC reset.
660          * One TX BD status update is delayed and, after EMAC reset, it 
661          * never happens, resulting in TX hung (it'll be recovered by TX 
662          * timeout handler eventually, but this is just gross).
663          * So we either have to do full TX reset or try to cheat here :)
664          *
665          * The only required change is to RX mode register, so I *think* all
666          * we need is just to stop RX channel. This seems to work on all
667          * tested SoCs.                                                --ebs
668          */
669         emac_rx_disable(dev);
670         if (rmr & EMAC_RMR_MAE)
671                 emac_hash_mc(dev);
672         out_be32(&p->rmr, rmr);
673         emac_rx_enable(dev);
674 }
675
676 /* BHs disabled */
677 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
678 {
679         struct ocp_func_emac_data *emacdata = dev->def->additions;
680         int rx_sync_size = emac_rx_sync_size(new_mtu);
681         int rx_skb_size = emac_rx_skb_size(new_mtu);
682         int i, ret = 0;
683
684         emac_rx_disable(dev);
685         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
686
687         if (dev->rx_sg_skb) {
688                 ++dev->estats.rx_dropped_resize;
689                 dev_kfree_skb(dev->rx_sg_skb);
690                 dev->rx_sg_skb = NULL;
691         }
692
693         /* Make a first pass over RX ring and mark BDs ready, dropping 
694          * non-processed packets on the way. We need this as a separate pass
695          * to simplify error recovery in the case of allocation failure later.
696          */
697         for (i = 0; i < NUM_RX_BUFF; ++i) {
698                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
699                         ++dev->estats.rx_dropped_resize;
700
701                 dev->rx_desc[i].data_len = 0;
702                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
703                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
704         }
705
706         /* Reallocate RX ring only if bigger skb buffers are required */
707         if (rx_skb_size <= dev->rx_skb_size)
708                 goto skip;
709
710         /* Second pass, allocate new skbs */
711         for (i = 0; i < NUM_RX_BUFF; ++i) {
712                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
713                 if (!skb) {
714                         ret = -ENOMEM;
715                         goto oom;
716                 }
717
718                 BUG_ON(!dev->rx_skb[i]);
719                 dev_kfree_skb(dev->rx_skb[i]);
720
721                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
722                 dev->rx_desc[i].data_ptr =
723                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
724                                    DMA_FROM_DEVICE) + 2;
725                 dev->rx_skb[i] = skb;
726         }
727       skip:
728         /* Check if we need to change "Jumbo" bit in MR1 */
729         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
730                 /* This is to prevent starting RX channel in emac_rx_enable() */
731                 dev->commac.rx_stopped = 1;
732
733                 dev->ndev->mtu = new_mtu;
734                 emac_full_tx_reset(dev->ndev);
735         }
736
737         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
738       oom:
739         /* Restart RX */
740         dev->commac.rx_stopped = dev->rx_slot = 0;
741         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
742         emac_rx_enable(dev);
743
744         return ret;
745 }
746
747 /* Process ctx, rtnl_lock semaphore */
748 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
749 {
750         struct ocp_enet_private *dev = ndev->priv;
751         int ret = 0;
752
753         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
754                 return -EINVAL;
755
756         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
757
758         local_bh_disable();
759         if (netif_running(ndev)) {
760                 /* Check if we really need to reinitalize RX ring */
761                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
762                         ret = emac_resize_rx_ring(dev, new_mtu);
763         }
764
765         if (!ret) {
766                 ndev->mtu = new_mtu;
767                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
768                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
769         }       
770         local_bh_enable();
771
772         return ret;
773 }
774
775 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
776 {
777         int i;
778         for (i = 0; i < NUM_TX_BUFF; ++i) {
779                 if (dev->tx_skb[i]) {
780                         dev_kfree_skb(dev->tx_skb[i]);
781                         dev->tx_skb[i] = NULL;
782                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
783                                 ++dev->estats.tx_dropped;
784                 }
785                 dev->tx_desc[i].ctrl = 0;
786                 dev->tx_desc[i].data_ptr = 0;
787         }
788 }
789
790 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
791 {
792         int i;
793         for (i = 0; i < NUM_RX_BUFF; ++i)
794                 if (dev->rx_skb[i]) {
795                         dev->rx_desc[i].ctrl = 0;
796                         dev_kfree_skb(dev->rx_skb[i]);
797                         dev->rx_skb[i] = NULL;
798                         dev->rx_desc[i].data_ptr = 0;
799                 }
800
801         if (dev->rx_sg_skb) {
802                 dev_kfree_skb(dev->rx_sg_skb);
803                 dev->rx_sg_skb = NULL;
804         }
805 }
806
807 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
808                                     int flags)
809 {
810         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
811         if (unlikely(!skb))
812                 return -ENOMEM;
813
814         dev->rx_skb[slot] = skb;
815         dev->rx_desc[slot].data_len = 0;
816
817         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
818         dev->rx_desc[slot].data_ptr = 
819             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
820                            DMA_FROM_DEVICE) + 2;
821         barrier();
822         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
823             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
824
825         return 0;
826 }
827
828 static void emac_print_link_status(struct ocp_enet_private *dev)
829 {
830         if (netif_carrier_ok(dev->ndev))
831                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
832                        dev->ndev->name, dev->phy.speed,
833                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
834                        dev->phy.pause ? ", pause enabled" :
835                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
836         else
837                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
838 }
839
840 /* Process ctx, rtnl_lock semaphore */
841 static int emac_open(struct net_device *ndev)
842 {
843         struct ocp_enet_private *dev = ndev->priv;
844         struct ocp_func_emac_data *emacdata = dev->def->additions;
845         int err, i;
846
847         DBG("%d: open" NL, dev->def->index);
848
849         /* Setup error IRQ handler */
850         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
851         if (err) {
852                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
853                        ndev->name, dev->def->irq);
854                 return err;
855         }
856
857         /* Allocate RX ring */
858         for (i = 0; i < NUM_RX_BUFF; ++i)
859                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
860                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
861                                ndev->name);
862                         goto oom;
863                 }
864
865         local_bh_disable();
866         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
867             dev->commac.rx_stopped = 0;
868         dev->rx_sg_skb = NULL;
869
870         if (dev->phy.address >= 0) {
871                 int link_poll_interval;
872                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
873                         dev->phy.def->ops->read_link(&dev->phy);
874                         EMAC_RX_CLK_DEFAULT(dev->def->index);
875                         netif_carrier_on(dev->ndev);
876                         link_poll_interval = PHY_POLL_LINK_ON;
877                 } else {
878                         EMAC_RX_CLK_TX(dev->def->index);
879                         netif_carrier_off(dev->ndev);
880                         link_poll_interval = PHY_POLL_LINK_OFF;
881                 }
882                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
883                 emac_print_link_status(dev);
884         } else
885                 netif_carrier_on(dev->ndev);
886
887         emac_configure(dev);
888         mal_poll_add(dev->mal, &dev->commac);
889         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
890         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
891         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
892         emac_tx_enable(dev);
893         emac_rx_enable(dev);
894         netif_start_queue(ndev);
895         local_bh_enable();
896
897         return 0;
898       oom:
899         emac_clean_rx_ring(dev);
900         free_irq(dev->def->irq, dev);
901         return -ENOMEM;
902 }
903
904 /* BHs disabled */
905 static int emac_link_differs(struct ocp_enet_private *dev)
906 {
907         u32 r = in_be32(&dev->emacp->mr1);
908
909         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
910         int speed, pause, asym_pause;
911
912         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
913                 speed = SPEED_1000;
914         else if (r & EMAC_MR1_MF_100)
915                 speed = SPEED_100;
916         else
917                 speed = SPEED_10;
918
919         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
920         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
921                 pause = 1;
922                 asym_pause = 0;
923                 break;
924         case EMAC_MR1_APP:
925                 pause = 0;
926                 asym_pause = 1;
927                 break;
928         default:
929                 pause = asym_pause = 0;
930         }
931         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
932             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
933 }
934
935 /* BHs disabled */
936 static void emac_link_timer(unsigned long data)
937 {
938         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
939         int link_poll_interval;
940
941         DBG2("%d: link timer" NL, dev->def->index);
942
943         if (dev->phy.def->ops->poll_link(&dev->phy)) {
944                 if (!netif_carrier_ok(dev->ndev)) {
945                         EMAC_RX_CLK_DEFAULT(dev->def->index);
946
947                         /* Get new link parameters */
948                         dev->phy.def->ops->read_link(&dev->phy);
949
950                         if (dev->tah_dev || emac_link_differs(dev))
951                                 emac_full_tx_reset(dev->ndev);
952
953                         netif_carrier_on(dev->ndev);
954                         emac_print_link_status(dev);
955                 }
956                 link_poll_interval = PHY_POLL_LINK_ON;
957         } else {
958                 if (netif_carrier_ok(dev->ndev)) {
959                         EMAC_RX_CLK_TX(dev->def->index);
960 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
961                         emac_reinitialize(dev);
962 #endif
963                         netif_carrier_off(dev->ndev);
964                         emac_print_link_status(dev);
965                 }
966
967                 /* Retry reset if the previous attempt failed.
968                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
969                  * case, but I left it here because it shouldn't trigger for
970                  * sane PHYs anyway.
971                  */
972                 if (unlikely(dev->reset_failed))
973                         emac_reinitialize(dev);
974
975                 link_poll_interval = PHY_POLL_LINK_OFF;
976         }
977         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
978 }
979
980 /* BHs disabled */
981 static void emac_force_link_update(struct ocp_enet_private *dev)
982 {
983         netif_carrier_off(dev->ndev);
984         if (timer_pending(&dev->link_timer))
985                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
986 }
987
988 /* Process ctx, rtnl_lock semaphore */
989 static int emac_close(struct net_device *ndev)
990 {
991         struct ocp_enet_private *dev = ndev->priv;
992         struct ocp_func_emac_data *emacdata = dev->def->additions;
993
994         DBG("%d: close" NL, dev->def->index);
995
996         local_bh_disable();
997
998         if (dev->phy.address >= 0)
999                 del_timer_sync(&dev->link_timer);
1000
1001         netif_stop_queue(ndev);
1002         emac_rx_disable(dev);
1003         emac_tx_disable(dev);
1004         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1005         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1006         mal_poll_del(dev->mal, &dev->commac);
1007         local_bh_enable();
1008
1009         emac_clean_tx_ring(dev);
1010         emac_clean_rx_ring(dev);
1011         free_irq(dev->def->irq, dev);
1012
1013         return 0;
1014 }
1015
1016 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1017                                struct sk_buff *skb)
1018 {
1019 #if defined(CONFIG_IBM_EMAC_TAH)
1020         if (skb->ip_summed == CHECKSUM_HW) {
1021                 ++dev->stats.tx_packets_csum;
1022                 return EMAC_TX_CTRL_TAH_CSUM;
1023         }
1024 #endif
1025         return 0;
1026 }
1027
1028 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1029 {
1030         struct emac_regs *p = dev->emacp;
1031         struct net_device *ndev = dev->ndev;
1032
1033         /* Send the packet out */
1034         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1035
1036         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1037                 netif_stop_queue(ndev);
1038                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1039         }
1040
1041         ndev->trans_start = jiffies;
1042         ++dev->stats.tx_packets;
1043         dev->stats.tx_bytes += len;
1044
1045         return 0;
1046 }
1047
1048 /* BHs disabled */
1049 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1050 {
1051         struct ocp_enet_private *dev = ndev->priv;
1052         unsigned int len = skb->len;
1053         int slot;
1054
1055         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1056             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1057
1058         slot = dev->tx_slot++;
1059         if (dev->tx_slot == NUM_TX_BUFF) {
1060                 dev->tx_slot = 0;
1061                 ctrl |= MAL_TX_CTRL_WRAP;
1062         }
1063
1064         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1065
1066         dev->tx_skb[slot] = skb;
1067         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1068                                                      DMA_TO_DEVICE);
1069         dev->tx_desc[slot].data_len = (u16) len;
1070         barrier();
1071         dev->tx_desc[slot].ctrl = ctrl;
1072
1073         return emac_xmit_finish(dev, len);
1074 }
1075
1076 #if defined(CONFIG_IBM_EMAC_TAH)
1077 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1078                                   u32 pd, int len, int last, u16 base_ctrl)
1079 {
1080         while (1) {
1081                 u16 ctrl = base_ctrl;
1082                 int chunk = min(len, MAL_MAX_TX_SIZE);
1083                 len -= chunk;
1084
1085                 slot = (slot + 1) % NUM_TX_BUFF;
1086
1087                 if (last && !len)
1088                         ctrl |= MAL_TX_CTRL_LAST;
1089                 if (slot == NUM_TX_BUFF - 1)
1090                         ctrl |= MAL_TX_CTRL_WRAP;
1091
1092                 dev->tx_skb[slot] = NULL;
1093                 dev->tx_desc[slot].data_ptr = pd;
1094                 dev->tx_desc[slot].data_len = (u16) chunk;
1095                 dev->tx_desc[slot].ctrl = ctrl;
1096                 ++dev->tx_cnt;
1097
1098                 if (!len)
1099                         break;
1100
1101                 pd += chunk;
1102         }
1103         return slot;
1104 }
1105
1106 /* BHs disabled (SG version for TAH equipped EMACs) */
1107 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1108 {
1109         struct ocp_enet_private *dev = ndev->priv;
1110         int nr_frags = skb_shinfo(skb)->nr_frags;
1111         int len = skb->len, chunk;
1112         int slot, i;
1113         u16 ctrl;
1114         u32 pd;
1115
1116         /* This is common "fast" path */
1117         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1118                 return emac_start_xmit(skb, ndev);
1119
1120         len -= skb->data_len;
1121
1122         /* Note, this is only an *estimation*, we can still run out of empty
1123          * slots because of the additional fragmentation into
1124          * MAL_MAX_TX_SIZE-sized chunks
1125          */
1126         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1127                 goto stop_queue;
1128
1129         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1130             emac_tx_csum(dev, skb);
1131         slot = dev->tx_slot;
1132
1133         /* skb data */
1134         dev->tx_skb[slot] = NULL;
1135         chunk = min(len, MAL_MAX_TX_SIZE);
1136         dev->tx_desc[slot].data_ptr = pd =
1137             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1138         dev->tx_desc[slot].data_len = (u16) chunk;
1139         len -= chunk;
1140         if (unlikely(len))
1141                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1142                                        ctrl);
1143         /* skb fragments */
1144         for (i = 0; i < nr_frags; ++i) {
1145                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1146                 len = frag->size;
1147
1148                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1149                         goto undo_frame;
1150
1151                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1152                                   DMA_TO_DEVICE);
1153
1154                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1155                                        ctrl);
1156         }
1157
1158         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1159              dev->tx_slot, slot);
1160
1161         /* Attach skb to the last slot so we don't release it too early */
1162         dev->tx_skb[slot] = skb;
1163
1164         /* Send the packet out */
1165         if (dev->tx_slot == NUM_TX_BUFF - 1)
1166                 ctrl |= MAL_TX_CTRL_WRAP;
1167         barrier();
1168         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1169         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1170
1171         return emac_xmit_finish(dev, skb->len);
1172
1173       undo_frame:
1174         /* Well, too bad. Our previous estimation was overly optimistic. 
1175          * Undo everything.
1176          */
1177         while (slot != dev->tx_slot) {
1178                 dev->tx_desc[slot].ctrl = 0;
1179                 --dev->tx_cnt;
1180                 if (--slot < 0)
1181                         slot = NUM_TX_BUFF - 1;
1182         }
1183         ++dev->estats.tx_undo;
1184
1185       stop_queue:
1186         netif_stop_queue(ndev);
1187         DBG2("%d: stopped TX queue" NL, dev->def->index);
1188         return 1;
1189 }
1190 #else
1191 # define emac_start_xmit_sg     emac_start_xmit
1192 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1193
1194 /* BHs disabled */
1195 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1196 {
1197         struct ibm_emac_error_stats *st = &dev->estats;
1198         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1199
1200         ++st->tx_bd_errors;
1201         if (ctrl & EMAC_TX_ST_BFCS)
1202                 ++st->tx_bd_bad_fcs;
1203         if (ctrl & EMAC_TX_ST_LCS)
1204                 ++st->tx_bd_carrier_loss;
1205         if (ctrl & EMAC_TX_ST_ED)
1206                 ++st->tx_bd_excessive_deferral;
1207         if (ctrl & EMAC_TX_ST_EC)
1208                 ++st->tx_bd_excessive_collisions;
1209         if (ctrl & EMAC_TX_ST_LC)
1210                 ++st->tx_bd_late_collision;
1211         if (ctrl & EMAC_TX_ST_MC)
1212                 ++st->tx_bd_multple_collisions;
1213         if (ctrl & EMAC_TX_ST_SC)
1214                 ++st->tx_bd_single_collision;
1215         if (ctrl & EMAC_TX_ST_UR)
1216                 ++st->tx_bd_underrun;
1217         if (ctrl & EMAC_TX_ST_SQE)
1218                 ++st->tx_bd_sqe;
1219 }
1220
1221 static void emac_poll_tx(void *param)
1222 {
1223         struct ocp_enet_private *dev = param;
1224         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1225              dev->ack_slot);
1226
1227         if (dev->tx_cnt) {
1228                 u16 ctrl;
1229                 int slot = dev->ack_slot, n = 0;
1230               again:
1231                 ctrl = dev->tx_desc[slot].ctrl;
1232                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1233                         struct sk_buff *skb = dev->tx_skb[slot];
1234                         ++n;
1235
1236                         if (skb) {
1237                                 dev_kfree_skb(skb);
1238                                 dev->tx_skb[slot] = NULL;
1239                         }
1240                         slot = (slot + 1) % NUM_TX_BUFF;
1241
1242                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1243                                 emac_parse_tx_error(dev, ctrl);
1244
1245                         if (--dev->tx_cnt)
1246                                 goto again;
1247                 }
1248                 if (n) {
1249                         dev->ack_slot = slot;
1250                         if (netif_queue_stopped(dev->ndev) &&
1251                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1252                                 netif_wake_queue(dev->ndev);
1253
1254                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1255                 }
1256         }
1257 }
1258
1259 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1260                                        int len)
1261 {
1262         struct sk_buff *skb = dev->rx_skb[slot];
1263         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1264
1265         if (len) 
1266                 dma_map_single(dev->ldev, skb->data - 2, 
1267                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1268
1269         dev->rx_desc[slot].data_len = 0;
1270         barrier();
1271         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1272             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1273 }
1274
1275 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1276 {
1277         struct ibm_emac_error_stats *st = &dev->estats;
1278         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1279
1280         ++st->rx_bd_errors;
1281         if (ctrl & EMAC_RX_ST_OE)
1282                 ++st->rx_bd_overrun;
1283         if (ctrl & EMAC_RX_ST_BP)
1284                 ++st->rx_bd_bad_packet;
1285         if (ctrl & EMAC_RX_ST_RP)
1286                 ++st->rx_bd_runt_packet;
1287         if (ctrl & EMAC_RX_ST_SE)
1288                 ++st->rx_bd_short_event;
1289         if (ctrl & EMAC_RX_ST_AE)
1290                 ++st->rx_bd_alignment_error;
1291         if (ctrl & EMAC_RX_ST_BFCS)
1292                 ++st->rx_bd_bad_fcs;
1293         if (ctrl & EMAC_RX_ST_PTL)
1294                 ++st->rx_bd_packet_too_long;
1295         if (ctrl & EMAC_RX_ST_ORE)
1296                 ++st->rx_bd_out_of_range;
1297         if (ctrl & EMAC_RX_ST_IRE)
1298                 ++st->rx_bd_in_range;
1299 }
1300
1301 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1302                                 struct sk_buff *skb, u16 ctrl)
1303 {
1304 #if defined(CONFIG_IBM_EMAC_TAH)
1305         if (!ctrl && dev->tah_dev) {
1306                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307                 ++dev->stats.rx_packets_csum;
1308         }
1309 #endif
1310 }
1311
1312 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1313 {
1314         if (likely(dev->rx_sg_skb != NULL)) {
1315                 int len = dev->rx_desc[slot].data_len;
1316                 int tot_len = dev->rx_sg_skb->len + len;
1317
1318                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1319                         ++dev->estats.rx_dropped_mtu;
1320                         dev_kfree_skb(dev->rx_sg_skb);
1321                         dev->rx_sg_skb = NULL;
1322                 } else {
1323                         cacheable_memcpy(dev->rx_sg_skb->tail,
1324                                          dev->rx_skb[slot]->data, len);
1325                         skb_put(dev->rx_sg_skb, len);
1326                         emac_recycle_rx_skb(dev, slot, len);
1327                         return 0;
1328                 }
1329         }
1330         emac_recycle_rx_skb(dev, slot, 0);
1331         return -1;
1332 }
1333
1334 /* BHs disabled */
1335 static int emac_poll_rx(void *param, int budget)
1336 {
1337         struct ocp_enet_private *dev = param;
1338         int slot = dev->rx_slot, received = 0;
1339
1340         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1341
1342       again:
1343         while (budget > 0) {
1344                 int len;
1345                 struct sk_buff *skb;
1346                 u16 ctrl = dev->rx_desc[slot].ctrl;
1347
1348                 if (ctrl & MAL_RX_CTRL_EMPTY)
1349                         break;
1350
1351                 skb = dev->rx_skb[slot];
1352                 barrier();
1353                 len = dev->rx_desc[slot].data_len;
1354
1355                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1356                         goto sg;
1357
1358                 ctrl &= EMAC_BAD_RX_MASK;
1359                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1360                         emac_parse_rx_error(dev, ctrl);
1361                         ++dev->estats.rx_dropped_error;
1362                         emac_recycle_rx_skb(dev, slot, 0);
1363                         len = 0;
1364                         goto next;
1365                 }
1366
1367                 if (len && len < EMAC_RX_COPY_THRESH) {
1368                         struct sk_buff *copy_skb =
1369                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1370                         if (unlikely(!copy_skb))
1371                                 goto oom;
1372
1373                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1374                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1375                                          len + 2);
1376                         emac_recycle_rx_skb(dev, slot, len);
1377                         skb = copy_skb;
1378                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1379                         goto oom;
1380
1381                 skb_put(skb, len);
1382               push_packet:
1383                 skb->dev = dev->ndev;
1384                 skb->protocol = eth_type_trans(skb, dev->ndev);
1385                 emac_rx_csum(dev, skb, ctrl);
1386
1387                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1388                         ++dev->estats.rx_dropped_stack;
1389               next:
1390                 ++dev->stats.rx_packets;
1391               skip:
1392                 dev->stats.rx_bytes += len;
1393                 slot = (slot + 1) % NUM_RX_BUFF;
1394                 --budget;
1395                 ++received;
1396                 continue;
1397               sg:
1398                 if (ctrl & MAL_RX_CTRL_FIRST) {
1399                         BUG_ON(dev->rx_sg_skb);
1400                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1401                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1402                                 ++dev->estats.rx_dropped_oom;
1403                                 emac_recycle_rx_skb(dev, slot, 0);
1404                         } else {
1405                                 dev->rx_sg_skb = skb;
1406                                 skb_put(skb, len);
1407                         }
1408                 } else if (!emac_rx_sg_append(dev, slot) &&
1409                            (ctrl & MAL_RX_CTRL_LAST)) {
1410
1411                         skb = dev->rx_sg_skb;
1412                         dev->rx_sg_skb = NULL;
1413
1414                         ctrl &= EMAC_BAD_RX_MASK;
1415                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1416                                 emac_parse_rx_error(dev, ctrl);
1417                                 ++dev->estats.rx_dropped_error;
1418                                 dev_kfree_skb(skb);
1419                                 len = 0;
1420                         } else
1421                                 goto push_packet;
1422                 }
1423                 goto skip;
1424               oom:
1425                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1426                 /* Drop the packet and recycle skb */
1427                 ++dev->estats.rx_dropped_oom;
1428                 emac_recycle_rx_skb(dev, slot, 0);
1429                 goto next;
1430         }
1431
1432         if (received) {
1433                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1434                 dev->rx_slot = slot;
1435         }
1436
1437         if (unlikely(budget && dev->commac.rx_stopped)) {
1438                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1439
1440                 barrier();
1441                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1442                         DBG2("%d: rx restart" NL, dev->def->index);
1443                         received = 0;
1444                         goto again;
1445                 }
1446
1447                 if (dev->rx_sg_skb) {
1448                         DBG2("%d: dropping partial rx packet" NL,
1449                              dev->def->index);
1450                         ++dev->estats.rx_dropped_error;
1451                         dev_kfree_skb(dev->rx_sg_skb);
1452                         dev->rx_sg_skb = NULL;
1453                 }
1454
1455                 dev->commac.rx_stopped = 0;
1456                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1457                 emac_rx_enable(dev);
1458                 dev->rx_slot = 0;
1459         }
1460         return received;
1461 }
1462
1463 /* BHs disabled */
1464 static int emac_peek_rx(void *param)
1465 {
1466         struct ocp_enet_private *dev = param;
1467         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1468 }
1469
1470 /* BHs disabled */
1471 static int emac_peek_rx_sg(void *param)
1472 {
1473         struct ocp_enet_private *dev = param;
1474         int slot = dev->rx_slot;
1475         while (1) {
1476                 u16 ctrl = dev->rx_desc[slot].ctrl;
1477                 if (ctrl & MAL_RX_CTRL_EMPTY)
1478                         return 0;
1479                 else if (ctrl & MAL_RX_CTRL_LAST)
1480                         return 1;
1481
1482                 slot = (slot + 1) % NUM_RX_BUFF;
1483
1484                 /* I'm just being paranoid here :) */
1485                 if (unlikely(slot == dev->rx_slot))
1486                         return 0;
1487         }
1488 }
1489
1490 /* Hard IRQ */
1491 static void emac_rxde(void *param)
1492 {
1493         struct ocp_enet_private *dev = param;
1494         ++dev->estats.rx_stopped;
1495         emac_rx_disable_async(dev);
1496 }
1497
1498 /* Hard IRQ */
1499 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1500 {
1501         struct ocp_enet_private *dev = dev_instance;
1502         struct emac_regs *p = dev->emacp;
1503         struct ibm_emac_error_stats *st = &dev->estats;
1504
1505         u32 isr = in_be32(&p->isr);
1506         out_be32(&p->isr, isr);
1507
1508         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1509
1510         if (isr & EMAC_ISR_TXPE)
1511                 ++st->tx_parity;
1512         if (isr & EMAC_ISR_RXPE)
1513                 ++st->rx_parity;
1514         if (isr & EMAC_ISR_TXUE)
1515                 ++st->tx_underrun;
1516         if (isr & EMAC_ISR_RXOE)
1517                 ++st->rx_fifo_overrun;
1518         if (isr & EMAC_ISR_OVR)
1519                 ++st->rx_overrun;
1520         if (isr & EMAC_ISR_BP)
1521                 ++st->rx_bad_packet;
1522         if (isr & EMAC_ISR_RP)
1523                 ++st->rx_runt_packet;
1524         if (isr & EMAC_ISR_SE)
1525                 ++st->rx_short_event;
1526         if (isr & EMAC_ISR_ALE)
1527                 ++st->rx_alignment_error;
1528         if (isr & EMAC_ISR_BFCS)
1529                 ++st->rx_bad_fcs;
1530         if (isr & EMAC_ISR_PTLE)
1531                 ++st->rx_packet_too_long;
1532         if (isr & EMAC_ISR_ORE)
1533                 ++st->rx_out_of_range;
1534         if (isr & EMAC_ISR_IRE)
1535                 ++st->rx_in_range;
1536         if (isr & EMAC_ISR_SQE)
1537                 ++st->tx_sqe;
1538         if (isr & EMAC_ISR_TE)
1539                 ++st->tx_errors;
1540
1541         return IRQ_HANDLED;
1542 }
1543
1544 static struct net_device_stats *emac_stats(struct net_device *ndev)
1545 {
1546         struct ocp_enet_private *dev = ndev->priv;
1547         struct ibm_emac_stats *st = &dev->stats;
1548         struct ibm_emac_error_stats *est = &dev->estats;
1549         struct net_device_stats *nst = &dev->nstats;
1550
1551         DBG2("%d: stats" NL, dev->def->index);
1552
1553         /* Compute "legacy" statistics */
1554         local_irq_disable();
1555         nst->rx_packets = (unsigned long)st->rx_packets;
1556         nst->rx_bytes = (unsigned long)st->rx_bytes;
1557         nst->tx_packets = (unsigned long)st->tx_packets;
1558         nst->tx_bytes = (unsigned long)st->tx_bytes;
1559         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1560                                           est->rx_dropped_error +
1561                                           est->rx_dropped_resize +
1562                                           est->rx_dropped_mtu);
1563         nst->tx_dropped = (unsigned long)est->tx_dropped;
1564
1565         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1566         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1567                                               est->rx_fifo_overrun +
1568                                               est->rx_overrun);
1569         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1570                                                est->rx_alignment_error);
1571         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1572                                              est->rx_bad_fcs);
1573         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1574                                                 est->rx_bd_short_event +
1575                                                 est->rx_bd_packet_too_long +
1576                                                 est->rx_bd_out_of_range +
1577                                                 est->rx_bd_in_range +
1578                                                 est->rx_runt_packet +
1579                                                 est->rx_short_event +
1580                                                 est->rx_packet_too_long +
1581                                                 est->rx_out_of_range +
1582                                                 est->rx_in_range);
1583
1584         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1585         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1586                                               est->tx_underrun);
1587         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1588         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1589                                           est->tx_bd_excessive_collisions +
1590                                           est->tx_bd_late_collision +
1591                                           est->tx_bd_multple_collisions);
1592         local_irq_enable();
1593         return nst;
1594 }
1595
1596 static void emac_remove(struct ocp_device *ocpdev)
1597 {
1598         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1599
1600         DBG("%d: remove" NL, dev->def->index);
1601
1602         ocp_set_drvdata(ocpdev, 0);
1603         unregister_netdev(dev->ndev);
1604
1605         tah_fini(dev->tah_dev);
1606         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1607         zmii_fini(dev->zmii_dev, dev->zmii_input);
1608
1609         emac_dbg_register(dev->def->index, 0);
1610
1611         mal_unregister_commac(dev->mal, &dev->commac);
1612         iounmap((void *)dev->emacp);
1613         kfree(dev->ndev);
1614 }
1615
1616 static struct mal_commac_ops emac_commac_ops = {
1617         .poll_tx = &emac_poll_tx,
1618         .poll_rx = &emac_poll_rx,
1619         .peek_rx = &emac_peek_rx,
1620         .rxde = &emac_rxde,
1621 };
1622
1623 static struct mal_commac_ops emac_commac_sg_ops = {
1624         .poll_tx = &emac_poll_tx,
1625         .poll_rx = &emac_poll_rx,
1626         .peek_rx = &emac_peek_rx_sg,
1627         .rxde = &emac_rxde,
1628 };
1629
1630 /* Ethtool support */
1631 static int emac_ethtool_get_settings(struct net_device *ndev,
1632                                      struct ethtool_cmd *cmd)
1633 {
1634         struct ocp_enet_private *dev = ndev->priv;
1635
1636         cmd->supported = dev->phy.features;
1637         cmd->port = PORT_MII;
1638         cmd->phy_address = dev->phy.address;
1639         cmd->transceiver =
1640             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1641
1642         local_bh_disable();
1643         cmd->advertising = dev->phy.advertising;
1644         cmd->autoneg = dev->phy.autoneg;
1645         cmd->speed = dev->phy.speed;
1646         cmd->duplex = dev->phy.duplex;
1647         local_bh_enable();
1648
1649         return 0;
1650 }
1651
1652 static int emac_ethtool_set_settings(struct net_device *ndev,
1653                                      struct ethtool_cmd *cmd)
1654 {
1655         struct ocp_enet_private *dev = ndev->priv;
1656         u32 f = dev->phy.features;
1657
1658         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1659             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1660
1661         /* Basic sanity checks */
1662         if (dev->phy.address < 0)
1663                 return -EOPNOTSUPP;
1664         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1665                 return -EINVAL;
1666         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1667                 return -EINVAL;
1668         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1669                 return -EINVAL;
1670
1671         if (cmd->autoneg == AUTONEG_DISABLE) {
1672                 switch (cmd->speed) {
1673                 case SPEED_10:
1674                         if (cmd->duplex == DUPLEX_HALF
1675                             && !(f & SUPPORTED_10baseT_Half))
1676                                 return -EINVAL;
1677                         if (cmd->duplex == DUPLEX_FULL
1678                             && !(f & SUPPORTED_10baseT_Full))
1679                                 return -EINVAL;
1680                         break;
1681                 case SPEED_100:
1682                         if (cmd->duplex == DUPLEX_HALF
1683                             && !(f & SUPPORTED_100baseT_Half))
1684                                 return -EINVAL;
1685                         if (cmd->duplex == DUPLEX_FULL
1686                             && !(f & SUPPORTED_100baseT_Full))
1687                                 return -EINVAL;
1688                         break;
1689                 case SPEED_1000:
1690                         if (cmd->duplex == DUPLEX_HALF
1691                             && !(f & SUPPORTED_1000baseT_Half))
1692                                 return -EINVAL;
1693                         if (cmd->duplex == DUPLEX_FULL
1694                             && !(f & SUPPORTED_1000baseT_Full))
1695                                 return -EINVAL;
1696                         break;
1697                 default:
1698                         return -EINVAL;
1699                 }
1700
1701                 local_bh_disable();
1702                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1703                                                 cmd->duplex);
1704
1705         } else {
1706                 if (!(f & SUPPORTED_Autoneg))
1707                         return -EINVAL;
1708
1709                 local_bh_disable();
1710                 dev->phy.def->ops->setup_aneg(&dev->phy,
1711                                               (cmd->advertising & f) |
1712                                               (dev->phy.advertising &
1713                                                (ADVERTISED_Pause |
1714                                                 ADVERTISED_Asym_Pause)));
1715         }
1716         emac_force_link_update(dev);
1717         local_bh_enable();
1718
1719         return 0;
1720 }
1721
1722 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1723                                        struct ethtool_ringparam *rp)
1724 {
1725         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1726         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1727 }
1728
1729 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1730                                         struct ethtool_pauseparam *pp)
1731 {
1732         struct ocp_enet_private *dev = ndev->priv;
1733
1734         local_bh_disable();
1735         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1736             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1737                 pp->autoneg = 1;
1738
1739         if (dev->phy.duplex == DUPLEX_FULL) {
1740                 if (dev->phy.pause)
1741                         pp->rx_pause = pp->tx_pause = 1;
1742                 else if (dev->phy.asym_pause)
1743                         pp->tx_pause = 1;
1744         }
1745         local_bh_enable();
1746 }
1747
1748 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1749 {
1750         struct ocp_enet_private *dev = ndev->priv;
1751         return dev->tah_dev != 0;
1752 }
1753
1754 static int emac_get_regs_len(struct ocp_enet_private *dev)
1755 {
1756         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1757 }
1758
1759 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1760 {
1761         struct ocp_enet_private *dev = ndev->priv;
1762         return sizeof(struct emac_ethtool_regs_hdr) +
1763             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1764             zmii_get_regs_len(dev->zmii_dev) +
1765             rgmii_get_regs_len(dev->rgmii_dev) +
1766             tah_get_regs_len(dev->tah_dev);
1767 }
1768
1769 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1770 {
1771         struct emac_ethtool_regs_subhdr *hdr = buf;
1772
1773         hdr->version = EMAC_ETHTOOL_REGS_VER;
1774         hdr->index = dev->def->index;
1775         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1776         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1777 }
1778
1779 static void emac_ethtool_get_regs(struct net_device *ndev,
1780                                   struct ethtool_regs *regs, void *buf)
1781 {
1782         struct ocp_enet_private *dev = ndev->priv;
1783         struct emac_ethtool_regs_hdr *hdr = buf;
1784
1785         hdr->components = 0;
1786         buf = hdr + 1;
1787
1788         local_irq_disable();
1789         buf = mal_dump_regs(dev->mal, buf);
1790         buf = emac_dump_regs(dev, buf);
1791         if (dev->zmii_dev) {
1792                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1793                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1794         }
1795         if (dev->rgmii_dev) {
1796                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1797                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1798         }
1799         if (dev->tah_dev) {
1800                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1801                 buf = tah_dump_regs(dev->tah_dev, buf);
1802         }
1803         local_irq_enable();
1804 }
1805
1806 static int emac_ethtool_nway_reset(struct net_device *ndev)
1807 {
1808         struct ocp_enet_private *dev = ndev->priv;
1809         int res = 0;
1810
1811         DBG("%d: nway_reset" NL, dev->def->index);
1812
1813         if (dev->phy.address < 0)
1814                 return -EOPNOTSUPP;
1815
1816         local_bh_disable();
1817         if (!dev->phy.autoneg) {
1818                 res = -EINVAL;
1819                 goto out;
1820         }
1821
1822         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1823         emac_force_link_update(dev);
1824
1825       out:
1826         local_bh_enable();
1827         return res;
1828 }
1829
1830 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1831 {
1832         return EMAC_ETHTOOL_STATS_COUNT;
1833 }
1834
1835 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1836                                      u8 * buf)
1837 {
1838         if (stringset == ETH_SS_STATS)
1839                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1840 }
1841
1842 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1843                                            struct ethtool_stats *estats,
1844                                            u64 * tmp_stats)
1845 {
1846         struct ocp_enet_private *dev = ndev->priv;
1847         local_irq_disable();
1848         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1849         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1850         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1851         local_irq_enable();
1852 }
1853
1854 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1855                                      struct ethtool_drvinfo *info)
1856 {
1857         struct ocp_enet_private *dev = ndev->priv;
1858
1859         strcpy(info->driver, "ibm_emac");
1860         strcpy(info->version, DRV_VERSION);
1861         info->fw_version[0] = '\0';
1862         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1863         info->n_stats = emac_ethtool_get_stats_count(ndev);
1864         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1865 }
1866
1867 static struct ethtool_ops emac_ethtool_ops = {
1868         .get_settings = emac_ethtool_get_settings,
1869         .set_settings = emac_ethtool_set_settings,
1870         .get_drvinfo = emac_ethtool_get_drvinfo,
1871
1872         .get_regs_len = emac_ethtool_get_regs_len,
1873         .get_regs = emac_ethtool_get_regs,
1874
1875         .nway_reset = emac_ethtool_nway_reset,
1876
1877         .get_ringparam = emac_ethtool_get_ringparam,
1878         .get_pauseparam = emac_ethtool_get_pauseparam,
1879
1880         .get_rx_csum = emac_ethtool_get_rx_csum,
1881
1882         .get_strings = emac_ethtool_get_strings,
1883         .get_stats_count = emac_ethtool_get_stats_count,
1884         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1885
1886         .get_link = ethtool_op_get_link,
1887         .get_tx_csum = ethtool_op_get_tx_csum,
1888         .get_sg = ethtool_op_get_sg,
1889 };
1890
1891 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1892 {
1893         struct ocp_enet_private *dev = ndev->priv;
1894         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1895
1896         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1897
1898         if (dev->phy.address < 0)
1899                 return -EOPNOTSUPP;
1900
1901         switch (cmd) {
1902         case SIOCGMIIPHY:
1903         case SIOCDEVPRIVATE:
1904                 data[0] = dev->phy.address;
1905                 /* Fall through */
1906         case SIOCGMIIREG:
1907         case SIOCDEVPRIVATE + 1:
1908                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1909                 return 0;
1910
1911         case SIOCSMIIREG:
1912         case SIOCDEVPRIVATE + 2:
1913                 if (!capable(CAP_NET_ADMIN))
1914                         return -EPERM;
1915                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1916                 return 0;
1917         default:
1918                 return -EOPNOTSUPP;
1919         }
1920 }
1921
1922 static int __init emac_probe(struct ocp_device *ocpdev)
1923 {
1924         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1925         struct net_device *ndev;
1926         struct ocp_device *maldev;
1927         struct ocp_enet_private *dev;
1928         int err, i;
1929
1930         DBG("%d: probe" NL, ocpdev->def->index);
1931
1932         if (!emacdata) {
1933                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1934                        ocpdev->def->index);
1935                 return -ENODEV;
1936         }
1937
1938         /* Allocate our net_device structure */
1939         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1940         if (!ndev) {
1941                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1942                        ocpdev->def->index);
1943                 return -ENOMEM;
1944         }
1945         dev = ndev->priv;
1946         dev->ndev = ndev;
1947         dev->ldev = &ocpdev->dev;
1948         dev->def = ocpdev->def;
1949         SET_MODULE_OWNER(ndev);
1950
1951         /* Find MAL device we are connected to */
1952         maldev =
1953             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1954         if (!maldev) {
1955                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1956                        dev->def->index, emacdata->mal_idx);
1957                 err = -ENODEV;
1958                 goto out;
1959         }
1960         dev->mal = ocp_get_drvdata(maldev);
1961         if (!dev->mal) {
1962                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1963                        dev->def->index, emacdata->mal_idx);
1964                 err = -ENODEV;
1965                 goto out;
1966         }
1967
1968         /* Register with MAL */
1969         dev->commac.ops = &emac_commac_ops;
1970         dev->commac.dev = dev;
1971         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1972         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1973         err = mal_register_commac(dev->mal, &dev->commac);
1974         if (err) {
1975                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1976                        dev->def->index, emacdata->mal_idx);
1977                 goto out;
1978         }
1979         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1980         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1981
1982         /* Get pointers to BD rings */
1983         dev->tx_desc =
1984             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1985                                                  emacdata->mal_tx_chan);
1986         dev->rx_desc =
1987             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1988                                                  emacdata->mal_rx_chan);
1989
1990         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1991         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1992
1993         /* Clean rings */
1994         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1995         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1996
1997         /* If we depend on another EMAC for MDIO, check whether it was probed already */
1998         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
1999                 struct ocp_device *mdiodev =
2000                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2001                                     emacdata->mdio_idx);
2002                 if (!mdiodev) {
2003                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2004                                dev->def->index, emacdata->mdio_idx);
2005                         err = -ENODEV;
2006                         goto out2;
2007                 }
2008                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2009                 if (!dev->mdio_dev) {
2010                         printk(KERN_ERR
2011                                "emac%d: emac%d hasn't been initialized yet!\n",
2012                                dev->def->index, emacdata->mdio_idx);
2013                         err = -ENODEV;
2014                         goto out2;
2015                 }
2016         }
2017
2018         /* Attach to ZMII, if needed */
2019         if ((err = zmii_attach(dev)) != 0)
2020                 goto out2;
2021
2022         /* Attach to RGMII, if needed */
2023         if ((err = rgmii_attach(dev)) != 0)
2024                 goto out3;
2025
2026         /* Attach to TAH, if needed */
2027         if ((err = tah_attach(dev)) != 0)
2028                 goto out4;
2029
2030         /* Map EMAC regs */
2031         dev->emacp =
2032             (struct emac_regs *)ioremap(dev->def->paddr,
2033                                         sizeof(struct emac_regs));
2034         if (!dev->emacp) {
2035                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2036                        dev->def->index);
2037                 err = -ENOMEM;
2038                 goto out5;
2039         }
2040
2041         /* Fill in MAC address */
2042         for (i = 0; i < 6; ++i)
2043                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2044
2045         /* Set some link defaults before we can find out real parameters */
2046         dev->phy.speed = SPEED_100;
2047         dev->phy.duplex = DUPLEX_FULL;
2048         dev->phy.autoneg = AUTONEG_DISABLE;
2049         dev->phy.pause = dev->phy.asym_pause = 0;
2050         init_timer(&dev->link_timer);
2051         dev->link_timer.function = emac_link_timer;
2052         dev->link_timer.data = (unsigned long)dev;
2053
2054         /* Find PHY if any */
2055         dev->phy.dev = ndev;
2056         dev->phy.mode = emacdata->phy_mode;
2057         if (emacdata->phy_map != 0xffffffff) {
2058                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2059                 u32 adv;
2060
2061                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2062                     emacdata->phy_map, busy_phy_map);
2063
2064                 EMAC_RX_CLK_TX(dev->def->index);
2065
2066                 dev->phy.mdio_read = emac_mdio_read;
2067                 dev->phy.mdio_write = emac_mdio_write;
2068
2069                 /* Configure EMAC with defaults so we can at least use MDIO
2070                  * This is needed mostly for 440GX
2071                  */
2072                 if (emac_phy_gpcs(dev->phy.mode)) {
2073                         /* XXX
2074                          * Make GPCS PHY address equal to EMAC index.
2075                          * We probably should take into account busy_phy_map
2076                          * and/or phy_map here.
2077                          */
2078                         dev->phy.address = dev->def->index;
2079                 }
2080                 
2081                 emac_configure(dev);
2082
2083                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2084                         if (!(phy_map & 1)) {
2085                                 int r;
2086                                 busy_phy_map |= 1 << i;
2087
2088                                 /* Quick check if there is a PHY at the address */
2089                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2090                                 if (r == 0xffff || r < 0)
2091                                         continue;
2092                                 if (!mii_phy_probe(&dev->phy, i))
2093                                         break;
2094                         }
2095                 if (i == 0x20) {
2096                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2097                                dev->def->index);
2098                         goto out6;
2099                 }
2100
2101                 /* Init PHY */
2102                 if (dev->phy.def->ops->init)
2103                         dev->phy.def->ops->init(&dev->phy);
2104                 
2105                 /* Disable any PHY features not supported by the platform */
2106                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2107
2108                 /* Setup initial link parameters */
2109                 if (dev->phy.features & SUPPORTED_Autoneg) {
2110                         adv = dev->phy.features;
2111 #if !defined(CONFIG_40x)
2112                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2113 #endif
2114                         /* Restart autonegotiation */
2115                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2116                 } else {
2117                         u32 f = dev->phy.def->features;
2118                         int speed = SPEED_10, fd = DUPLEX_HALF;
2119
2120                         /* Select highest supported speed/duplex */
2121                         if (f & SUPPORTED_1000baseT_Full) {
2122                                 speed = SPEED_1000;
2123                                 fd = DUPLEX_FULL;
2124                         } else if (f & SUPPORTED_1000baseT_Half)
2125                                 speed = SPEED_1000;
2126                         else if (f & SUPPORTED_100baseT_Full) {
2127                                 speed = SPEED_100;
2128                                 fd = DUPLEX_FULL;
2129                         } else if (f & SUPPORTED_100baseT_Half)
2130                                 speed = SPEED_100;
2131                         else if (f & SUPPORTED_10baseT_Full)
2132                                 fd = DUPLEX_FULL;
2133
2134                         /* Force link parameters */
2135                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2136                 }
2137         } else {
2138                 emac_reset(dev);
2139
2140                 /* PHY-less configuration.
2141                  * XXX I probably should move these settings to emacdata
2142                  */
2143                 dev->phy.address = -1;
2144                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2145                 dev->phy.pause = 1;
2146         }
2147
2148         /* Fill in the driver function table */
2149         ndev->open = &emac_open;
2150         if (dev->tah_dev) {
2151                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2152                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2153         } else
2154                 ndev->hard_start_xmit = &emac_start_xmit;
2155         ndev->tx_timeout = &emac_full_tx_reset;
2156         ndev->watchdog_timeo = 5 * HZ;
2157         ndev->stop = &emac_close;
2158         ndev->get_stats = &emac_stats;
2159         ndev->set_multicast_list = &emac_set_multicast_list;
2160         ndev->do_ioctl = &emac_ioctl;
2161         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2162                 ndev->change_mtu = &emac_change_mtu;
2163                 dev->commac.ops = &emac_commac_sg_ops;
2164         }
2165         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2166
2167         netif_carrier_off(ndev);
2168         netif_stop_queue(ndev);
2169
2170         err = register_netdev(ndev);
2171         if (err) {
2172                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2173                        dev->def->index, err);
2174                 goto out6;
2175         }
2176
2177         ocp_set_drvdata(ocpdev, dev);
2178
2179         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2180                ndev->name, dev->def->index,
2181                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2182                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2183
2184         if (dev->phy.address >= 0)
2185                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2186                        dev->phy.def->name, dev->phy.address);
2187
2188         emac_dbg_register(dev->def->index, dev);
2189
2190         return 0;
2191       out6:
2192         iounmap((void *)dev->emacp);
2193       out5:
2194         tah_fini(dev->tah_dev);
2195       out4:
2196         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2197       out3:
2198         zmii_fini(dev->zmii_dev, dev->zmii_input);
2199       out2:
2200         mal_unregister_commac(dev->mal, &dev->commac);
2201       out:
2202         kfree(ndev);
2203         return err;
2204 }
2205
2206 static struct ocp_device_id emac_ids[] = {
2207         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2208         { .vendor = OCP_VENDOR_INVALID}
2209 };
2210
2211 static struct ocp_driver emac_driver = {
2212         .name = "emac",
2213         .id_table = emac_ids,
2214         .probe = emac_probe,
2215         .remove = emac_remove,
2216 };
2217
2218 static int __init emac_init(void)
2219 {
2220         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2221
2222         DBG(": init" NL);
2223
2224         if (mal_init())
2225                 return -ENODEV;
2226
2227         EMAC_CLK_INTERNAL;
2228         if (ocp_register_driver(&emac_driver)) {
2229                 EMAC_CLK_EXTERNAL;
2230                 ocp_unregister_driver(&emac_driver);
2231                 mal_exit();
2232                 return -ENODEV;
2233         }
2234         EMAC_CLK_EXTERNAL;
2235
2236         emac_init_debug();
2237         return 0;
2238 }
2239
2240 static void __exit emac_exit(void)
2241 {
2242         DBG(": exit" NL);
2243         ocp_unregister_driver(&emac_driver);
2244         mal_exit();
2245         emac_fini_debug();
2246 }
2247
2248 module_init(emac_init);
2249 module_exit(emac_exit);