Merge git://git.infradead.org/mtd-2.6
[linux-2.6] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/crc32.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/bitops.h>
38
39 #include <asm/processor.h>
40 #include <asm/io.h>
41 #include <asm/dma.h>
42 #include <asm/uaccess.h>
43 #include <asm/ocp.h>
44
45 #include "ibm_emac_core.h"
46 #include "ibm_emac_debug.h"
47
48 /*
49  * Lack of dma_unmap_???? calls is intentional.
50  *
51  * API-correct usage requires additional support state information to be 
52  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53  * EMAC design (e.g. TX buffer passed from network stack can be split into
54  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55  * maintaining such information will add additional overhead.
56  * Current DMA API implementation for 4xx processors only ensures cache coherency
57  * and dma_unmap_???? routines are empty and are likely to stay this way.
58  * I decided to omit dma_unmap_??? calls because I don't want to add additional
59  * complexity just for the sake of following some abstract API, when it doesn't
60  * add any real benefit to the driver. I understand that this decision maybe 
61  * controversial, but I really tried to make code API-correct and efficient 
62  * at the same time and didn't come up with code I liked :(.                --ebs
63  */
64
65 #define DRV_NAME        "emac"
66 #define DRV_VERSION     "3.54"
67 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
68
69 MODULE_DESCRIPTION(DRV_DESC);
70 MODULE_AUTHOR
71     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
72 MODULE_LICENSE("GPL");
73
74 /* minimum number of free TX descriptors required to wake up TX process */
75 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
76
77 /* If packet size is less than this number, we allocate small skb and copy packet 
78  * contents into it instead of just sending original big skb up
79  */
80 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
81
82 /* Since multiple EMACs share MDIO lines in various ways, we need
83  * to avoid re-using the same PHY ID in cases where the arch didn't
84  * setup precise phy_map entries
85  */
86 static u32 busy_phy_map;
87
88 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
89     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
90 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
91  * with PHY RX clock problem.
92  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
93  * also allows controlling each EMAC clock
94  */
95 static inline void EMAC_RX_CLK_TX(int idx)
96 {
97         unsigned long flags;
98         local_irq_save(flags);
99
100 #if defined(CONFIG_405EP)
101         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
102 #else /* CONFIG_440EP || CONFIG_440GR */
103         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
104 #endif
105
106         local_irq_restore(flags);
107 }
108
109 static inline void EMAC_RX_CLK_DEFAULT(int idx)
110 {
111         unsigned long flags;
112         local_irq_save(flags);
113
114 #if defined(CONFIG_405EP)
115         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
116 #else /* CONFIG_440EP */
117         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
118 #endif
119
120         local_irq_restore(flags);
121 }
122 #else
123 #define EMAC_RX_CLK_TX(idx)             ((void)0)
124 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
125 #endif
126
127 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
128 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
129  * unfortunately this is less flexible than 440EP case, because it's a global 
130  * setting for all EMACs, therefore we do this clock trick only during probe.
131  */
132 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
133                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
134 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
135                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
136 #else
137 #define EMAC_CLK_INTERNAL               ((void)0)
138 #define EMAC_CLK_EXTERNAL               ((void)0)
139 #endif
140
141 /* I don't want to litter system log with timeout errors 
142  * when we have brain-damaged PHY.
143  */
144 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
145                                              const char *error)
146 {
147 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
148         DBG("%d: %s" NL, dev->def->index, error);
149 #else
150         if (net_ratelimit())
151                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
152 #endif
153 }
154
155 /* PHY polling intervals */
156 #define PHY_POLL_LINK_ON        HZ
157 #define PHY_POLL_LINK_OFF       (HZ / 5)
158
159 /* Graceful stop timeouts in us. 
160  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
161  */
162 #define STOP_TIMEOUT_10         1230    
163 #define STOP_TIMEOUT_100        124
164 #define STOP_TIMEOUT_1000       13
165 #define STOP_TIMEOUT_1000_JUMBO 73
166
167 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
168 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
169         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
170         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
171         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
172         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
173         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
174         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
175         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
176         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
177         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
178         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
179         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
180         "tx_bd_excessive_collisions", "tx_bd_late_collision",
181         "tx_bd_multple_collisions", "tx_bd_single_collision",
182         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
183         "tx_errors"
184 };
185
186 static irqreturn_t emac_irq(int irq, void *dev_instance);
187 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
188
189 static inline int emac_phy_supports_gige(int phy_mode)
190 {
191         return  phy_mode == PHY_MODE_GMII ||
192                 phy_mode == PHY_MODE_RGMII ||
193                 phy_mode == PHY_MODE_TBI ||
194                 phy_mode == PHY_MODE_RTBI;
195 }
196
197 static inline int emac_phy_gpcs(int phy_mode)
198 {
199         return  phy_mode == PHY_MODE_TBI ||
200                 phy_mode == PHY_MODE_RTBI;
201 }
202
203 static inline void emac_tx_enable(struct ocp_enet_private *dev)
204 {
205         struct emac_regs __iomem *p = dev->emacp;
206         unsigned long flags;
207         u32 r;
208
209         local_irq_save(flags);
210
211         DBG("%d: tx_enable" NL, dev->def->index);
212
213         r = in_be32(&p->mr0);
214         if (!(r & EMAC_MR0_TXE))
215                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
216         local_irq_restore(flags);
217 }
218
219 static void emac_tx_disable(struct ocp_enet_private *dev)
220 {
221         struct emac_regs __iomem *p = dev->emacp;
222         unsigned long flags;
223         u32 r;
224
225         local_irq_save(flags);
226
227         DBG("%d: tx_disable" NL, dev->def->index);
228
229         r = in_be32(&p->mr0);
230         if (r & EMAC_MR0_TXE) {
231                 int n = dev->stop_timeout;
232                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
233                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
234                         udelay(1);
235                         --n;
236                 }       
237                 if (unlikely(!n))
238                         emac_report_timeout_error(dev, "TX disable timeout");
239         }
240         local_irq_restore(flags);
241 }
242
243 static void emac_rx_enable(struct ocp_enet_private *dev)
244 {
245         struct emac_regs __iomem *p = dev->emacp;
246         unsigned long flags;
247         u32 r;
248
249         local_irq_save(flags);
250         if (unlikely(dev->commac.rx_stopped))
251                 goto out;
252
253         DBG("%d: rx_enable" NL, dev->def->index);
254
255         r = in_be32(&p->mr0);
256         if (!(r & EMAC_MR0_RXE)) {
257                 if (unlikely(!(r & EMAC_MR0_RXI))) {
258                         /* Wait if previous async disable is still in progress */
259                         int n = dev->stop_timeout;
260                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
261                                 udelay(1);
262                                 --n;
263                         }       
264                         if (unlikely(!n))
265                                 emac_report_timeout_error(dev,
266                                                           "RX disable timeout");
267                 }
268                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
269         }
270       out:
271         local_irq_restore(flags);
272 }
273
274 static void emac_rx_disable(struct ocp_enet_private *dev)
275 {
276         struct emac_regs __iomem *p = dev->emacp;
277         unsigned long flags;
278         u32 r;
279
280         local_irq_save(flags);
281
282         DBG("%d: rx_disable" NL, dev->def->index);
283
284         r = in_be32(&p->mr0);
285         if (r & EMAC_MR0_RXE) {
286                 int n = dev->stop_timeout;
287                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289                         udelay(1);
290                         --n;
291                 }       
292                 if (unlikely(!n))
293                         emac_report_timeout_error(dev, "RX disable timeout");
294         }
295         local_irq_restore(flags);
296 }
297
298 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
299 {
300         struct emac_regs __iomem *p = dev->emacp;
301         unsigned long flags;
302         u32 r;
303
304         local_irq_save(flags);
305
306         DBG("%d: rx_disable_async" NL, dev->def->index);
307
308         r = in_be32(&p->mr0);
309         if (r & EMAC_MR0_RXE)
310                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
311         local_irq_restore(flags);
312 }
313
314 static int emac_reset(struct ocp_enet_private *dev)
315 {
316         struct emac_regs __iomem *p = dev->emacp;
317         unsigned long flags;
318         int n = 20;
319
320         DBG("%d: reset" NL, dev->def->index);
321
322         local_irq_save(flags);
323
324         if (!dev->reset_failed) {
325                 /* 40x erratum suggests stopping RX channel before reset,
326                  * we stop TX as well
327                  */
328                 emac_rx_disable(dev);
329                 emac_tx_disable(dev);
330         }
331
332         out_be32(&p->mr0, EMAC_MR0_SRST);
333         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
334                 --n;
335         local_irq_restore(flags);
336
337         if (n) {
338                 dev->reset_failed = 0;
339                 return 0;
340         } else {
341                 emac_report_timeout_error(dev, "reset timeout");
342                 dev->reset_failed = 1;
343                 return -ETIMEDOUT;
344         }
345 }
346
347 static void emac_hash_mc(struct ocp_enet_private *dev)
348 {
349         struct emac_regs __iomem *p = dev->emacp;
350         u16 gaht[4] = { 0 };
351         struct dev_mc_list *dmi;
352
353         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
354
355         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
356                 int bit;
357                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
358                      dev->def->index,
359                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
360                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
361
362                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
363                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
364         }
365         out_be32(&p->gaht1, gaht[0]);
366         out_be32(&p->gaht2, gaht[1]);
367         out_be32(&p->gaht3, gaht[2]);
368         out_be32(&p->gaht4, gaht[3]);
369 }
370
371 static inline u32 emac_iff2rmr(struct net_device *ndev)
372 {
373         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
374             EMAC_RMR_BASE;
375
376         if (ndev->flags & IFF_PROMISC)
377                 r |= EMAC_RMR_PME;
378         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
379                 r |= EMAC_RMR_PMME;
380         else if (ndev->mc_count > 0)
381                 r |= EMAC_RMR_MAE;
382
383         return r;
384 }
385
386 static inline int emac_opb_mhz(void)
387 {
388         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
389 }
390
391 /* BHs disabled */
392 static int emac_configure(struct ocp_enet_private *dev)
393 {
394         struct emac_regs __iomem *p = dev->emacp;
395         struct net_device *ndev = dev->ndev;
396         int gige;
397         u32 r;
398
399         DBG("%d: configure" NL, dev->def->index);
400
401         if (emac_reset(dev) < 0)
402                 return -ETIMEDOUT;
403
404         tah_reset(dev->tah_dev);
405
406         /* Mode register */
407         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
408         if (dev->phy.duplex == DUPLEX_FULL)
409                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
410         dev->stop_timeout = STOP_TIMEOUT_10;
411         switch (dev->phy.speed) {
412         case SPEED_1000:
413                 if (emac_phy_gpcs(dev->phy.mode)) {
414                         r |= EMAC_MR1_MF_1000GPCS |
415                             EMAC_MR1_MF_IPPA(dev->phy.address);
416
417                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
418                          * identify this GPCS PHY later.
419                          */
420                         out_be32(&p->ipcr, 0xdeadbeef);
421                 } else
422                         r |= EMAC_MR1_MF_1000;
423                 r |= EMAC_MR1_RFS_16K;
424                 gige = 1;
425
426                 if (dev->ndev->mtu > ETH_DATA_LEN) {
427                         r |= EMAC_MR1_JPSM;
428                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
429                 } else
430                         dev->stop_timeout = STOP_TIMEOUT_1000;
431                 break;
432         case SPEED_100:
433                 r |= EMAC_MR1_MF_100;
434                 dev->stop_timeout = STOP_TIMEOUT_100;
435                 /* Fall through */
436         default:
437                 r |= EMAC_MR1_RFS_4K;
438                 gige = 0;
439                 break;
440         }
441
442         if (dev->rgmii_dev)
443                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
444                                 dev->phy.speed);
445         else
446                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
447
448 #if !defined(CONFIG_40x)
449         /* on 40x erratum forces us to NOT use integrated flow control, 
450          * let's hope it works on 44x ;)
451          */
452         if (dev->phy.duplex == DUPLEX_FULL) {
453                 if (dev->phy.pause)
454                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
455                 else if (dev->phy.asym_pause)
456                         r |= EMAC_MR1_APP;
457         }
458 #endif
459         out_be32(&p->mr1, r);
460
461         /* Set individual MAC address */
462         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
463         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
464                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
465                  ndev->dev_addr[5]);
466
467         /* VLAN Tag Protocol ID */
468         out_be32(&p->vtpid, 0x8100);
469
470         /* Receive mode register */
471         r = emac_iff2rmr(ndev);
472         if (r & EMAC_RMR_MAE)
473                 emac_hash_mc(dev);
474         out_be32(&p->rmr, r);
475
476         /* FIFOs thresholds */
477         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
478                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
479         out_be32(&p->tmr1, r);
480         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
481
482         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
483            there should be still enough space in FIFO to allow the our link
484            partner time to process this frame and also time to send PAUSE 
485            frame itself.
486
487            Here is the worst case scenario for the RX FIFO "headroom"
488            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
489
490            1) One maximum-length frame on TX                    1522 bytes
491            2) One PAUSE frame time                                64 bytes
492            3) PAUSE frame decode time allowance                   64 bytes
493            4) One maximum-length frame on RX                    1522 bytes
494            5) Round-trip propagation delay of the link (100Mb)    15 bytes
495            ----------       
496            3187 bytes
497
498            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
499            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
500          */
501         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
502                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
503         out_be32(&p->rwmr, r);
504
505         /* Set PAUSE timer to the maximum */
506         out_be32(&p->ptr, 0xffff);
507
508         /* IRQ sources */
509         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
510                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
511                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
512                  EMAC_ISR_IRE | EMAC_ISR_TE);
513                  
514         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
515         if (emac_phy_gpcs(dev->phy.mode)) 
516                 mii_reset_phy(&dev->phy);
517                  
518         return 0;
519 }
520
521 /* BHs disabled */
522 static void emac_reinitialize(struct ocp_enet_private *dev)
523 {
524         DBG("%d: reinitialize" NL, dev->def->index);
525
526         if (!emac_configure(dev)) {
527                 emac_tx_enable(dev);
528                 emac_rx_enable(dev);
529         }
530 }
531
532 /* BHs disabled */
533 static void emac_full_tx_reset(struct net_device *ndev)
534 {
535         struct ocp_enet_private *dev = ndev->priv;
536         struct ocp_func_emac_data *emacdata = dev->def->additions;
537
538         DBG("%d: full_tx_reset" NL, dev->def->index);
539
540         emac_tx_disable(dev);
541         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
542         emac_clean_tx_ring(dev);
543         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
544
545         emac_configure(dev);
546
547         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
548         emac_tx_enable(dev);
549         emac_rx_enable(dev);
550
551         netif_wake_queue(ndev);
552 }
553
554 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
555 {
556         struct emac_regs __iomem *p = dev->emacp;
557         u32 r;
558         int n;
559
560         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
561
562         /* Enable proper MDIO port */
563         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
564
565         /* Wait for management interface to become idle */
566         n = 10;
567         while (!emac_phy_done(in_be32(&p->stacr))) {
568                 udelay(1);
569                 if (!--n)
570                         goto to;
571         }
572
573         /* Issue read command */
574         out_be32(&p->stacr,
575                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
576                  (reg & EMAC_STACR_PRA_MASK)
577                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
578                  | EMAC_STACR_START);
579
580         /* Wait for read to complete */
581         n = 100;
582         while (!emac_phy_done(r = in_be32(&p->stacr))) {
583                 udelay(1);
584                 if (!--n)
585                         goto to;
586         }
587
588         if (unlikely(r & EMAC_STACR_PHYE)) {
589                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
590                     id, reg);
591                 return -EREMOTEIO;
592         }
593
594         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
595         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
596         return r;
597       to:
598         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
599         return -ETIMEDOUT;
600 }
601
602 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
603                               u16 val)
604 {
605         struct emac_regs __iomem *p = dev->emacp;
606         int n;
607
608         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
609              val);
610
611         /* Enable proper MDIO port */
612         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
613
614         /* Wait for management interface to be idle */
615         n = 10;
616         while (!emac_phy_done(in_be32(&p->stacr))) {
617                 udelay(1);
618                 if (!--n)
619                         goto to;
620         }
621
622         /* Issue write command */
623         out_be32(&p->stacr,
624                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
625                  (reg & EMAC_STACR_PRA_MASK) |
626                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
627                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
628
629         /* Wait for write to complete */
630         n = 100;
631         while (!emac_phy_done(in_be32(&p->stacr))) {
632                 udelay(1);
633                 if (!--n)
634                         goto to;
635         }
636         return;
637       to:
638         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
639 }
640
641 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
642 {
643         struct ocp_enet_private *dev = ndev->priv;
644         int res;
645
646         local_bh_disable();
647         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
648                                (u8) reg);
649         local_bh_enable();
650         return res;
651 }
652
653 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
654 {
655         struct ocp_enet_private *dev = ndev->priv;
656
657         local_bh_disable();
658         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
659                           (u8) reg, (u16) val);
660         local_bh_enable();
661 }
662
663 /* BHs disabled */
664 static void emac_set_multicast_list(struct net_device *ndev)
665 {
666         struct ocp_enet_private *dev = ndev->priv;
667         struct emac_regs __iomem *p = dev->emacp;
668         u32 rmr = emac_iff2rmr(ndev);
669
670         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
671         BUG_ON(!netif_running(dev->ndev));
672
673         /* I decided to relax register access rules here to avoid
674          * full EMAC reset.
675          *
676          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
677          * in MR1 register and do a full EMAC reset.
678          * One TX BD status update is delayed and, after EMAC reset, it 
679          * never happens, resulting in TX hung (it'll be recovered by TX 
680          * timeout handler eventually, but this is just gross).
681          * So we either have to do full TX reset or try to cheat here :)
682          *
683          * The only required change is to RX mode register, so I *think* all
684          * we need is just to stop RX channel. This seems to work on all
685          * tested SoCs.                                                --ebs
686          */
687         emac_rx_disable(dev);
688         if (rmr & EMAC_RMR_MAE)
689                 emac_hash_mc(dev);
690         out_be32(&p->rmr, rmr);
691         emac_rx_enable(dev);
692 }
693
694 /* BHs disabled */
695 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
696 {
697         struct ocp_func_emac_data *emacdata = dev->def->additions;
698         int rx_sync_size = emac_rx_sync_size(new_mtu);
699         int rx_skb_size = emac_rx_skb_size(new_mtu);
700         int i, ret = 0;
701
702         emac_rx_disable(dev);
703         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
704
705         if (dev->rx_sg_skb) {
706                 ++dev->estats.rx_dropped_resize;
707                 dev_kfree_skb(dev->rx_sg_skb);
708                 dev->rx_sg_skb = NULL;
709         }
710
711         /* Make a first pass over RX ring and mark BDs ready, dropping 
712          * non-processed packets on the way. We need this as a separate pass
713          * to simplify error recovery in the case of allocation failure later.
714          */
715         for (i = 0; i < NUM_RX_BUFF; ++i) {
716                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
717                         ++dev->estats.rx_dropped_resize;
718
719                 dev->rx_desc[i].data_len = 0;
720                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
721                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
722         }
723
724         /* Reallocate RX ring only if bigger skb buffers are required */
725         if (rx_skb_size <= dev->rx_skb_size)
726                 goto skip;
727
728         /* Second pass, allocate new skbs */
729         for (i = 0; i < NUM_RX_BUFF; ++i) {
730                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
731                 if (!skb) {
732                         ret = -ENOMEM;
733                         goto oom;
734                 }
735
736                 BUG_ON(!dev->rx_skb[i]);
737                 dev_kfree_skb(dev->rx_skb[i]);
738
739                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
740                 dev->rx_desc[i].data_ptr =
741                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
742                                    DMA_FROM_DEVICE) + 2;
743                 dev->rx_skb[i] = skb;
744         }
745       skip:
746         /* Check if we need to change "Jumbo" bit in MR1 */
747         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
748                 /* This is to prevent starting RX channel in emac_rx_enable() */
749                 dev->commac.rx_stopped = 1;
750
751                 dev->ndev->mtu = new_mtu;
752                 emac_full_tx_reset(dev->ndev);
753         }
754
755         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
756       oom:
757         /* Restart RX */
758         dev->commac.rx_stopped = dev->rx_slot = 0;
759         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
760         emac_rx_enable(dev);
761
762         return ret;
763 }
764
765 /* Process ctx, rtnl_lock semaphore */
766 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
767 {
768         struct ocp_enet_private *dev = ndev->priv;
769         int ret = 0;
770
771         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
772                 return -EINVAL;
773
774         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
775
776         local_bh_disable();
777         if (netif_running(ndev)) {
778                 /* Check if we really need to reinitalize RX ring */
779                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
780                         ret = emac_resize_rx_ring(dev, new_mtu);
781         }
782
783         if (!ret) {
784                 ndev->mtu = new_mtu;
785                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
786                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
787         }       
788         local_bh_enable();
789
790         return ret;
791 }
792
793 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
794 {
795         int i;
796         for (i = 0; i < NUM_TX_BUFF; ++i) {
797                 if (dev->tx_skb[i]) {
798                         dev_kfree_skb(dev->tx_skb[i]);
799                         dev->tx_skb[i] = NULL;
800                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
801                                 ++dev->estats.tx_dropped;
802                 }
803                 dev->tx_desc[i].ctrl = 0;
804                 dev->tx_desc[i].data_ptr = 0;
805         }
806 }
807
808 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
809 {
810         int i;
811         for (i = 0; i < NUM_RX_BUFF; ++i)
812                 if (dev->rx_skb[i]) {
813                         dev->rx_desc[i].ctrl = 0;
814                         dev_kfree_skb(dev->rx_skb[i]);
815                         dev->rx_skb[i] = NULL;
816                         dev->rx_desc[i].data_ptr = 0;
817                 }
818
819         if (dev->rx_sg_skb) {
820                 dev_kfree_skb(dev->rx_sg_skb);
821                 dev->rx_sg_skb = NULL;
822         }
823 }
824
825 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
826                                     gfp_t flags)
827 {
828         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
829         if (unlikely(!skb))
830                 return -ENOMEM;
831
832         dev->rx_skb[slot] = skb;
833         dev->rx_desc[slot].data_len = 0;
834
835         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
836         dev->rx_desc[slot].data_ptr = 
837             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
838                            DMA_FROM_DEVICE) + 2;
839         barrier();
840         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
841             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
842
843         return 0;
844 }
845
846 static void emac_print_link_status(struct ocp_enet_private *dev)
847 {
848         if (netif_carrier_ok(dev->ndev))
849                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
850                        dev->ndev->name, dev->phy.speed,
851                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
852                        dev->phy.pause ? ", pause enabled" :
853                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
854         else
855                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
856 }
857
858 /* Process ctx, rtnl_lock semaphore */
859 static int emac_open(struct net_device *ndev)
860 {
861         struct ocp_enet_private *dev = ndev->priv;
862         struct ocp_func_emac_data *emacdata = dev->def->additions;
863         int err, i;
864
865         DBG("%d: open" NL, dev->def->index);
866
867         /* Setup error IRQ handler */
868         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
869         if (err) {
870                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
871                        ndev->name, dev->def->irq);
872                 return err;
873         }
874
875         /* Allocate RX ring */
876         for (i = 0; i < NUM_RX_BUFF; ++i)
877                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
878                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
879                                ndev->name);
880                         goto oom;
881                 }
882
883         local_bh_disable();
884         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
885             dev->commac.rx_stopped = 0;
886         dev->rx_sg_skb = NULL;
887
888         if (dev->phy.address >= 0) {
889                 int link_poll_interval;
890                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
891                         dev->phy.def->ops->read_link(&dev->phy);
892                         EMAC_RX_CLK_DEFAULT(dev->def->index);
893                         netif_carrier_on(dev->ndev);
894                         link_poll_interval = PHY_POLL_LINK_ON;
895                 } else {
896                         EMAC_RX_CLK_TX(dev->def->index);
897                         netif_carrier_off(dev->ndev);
898                         link_poll_interval = PHY_POLL_LINK_OFF;
899                 }
900                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
901                 emac_print_link_status(dev);
902         } else
903                 netif_carrier_on(dev->ndev);
904
905         emac_configure(dev);
906         mal_poll_add(dev->mal, &dev->commac);
907         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
908         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
909         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
910         emac_tx_enable(dev);
911         emac_rx_enable(dev);
912         netif_start_queue(ndev);
913         local_bh_enable();
914
915         return 0;
916       oom:
917         emac_clean_rx_ring(dev);
918         free_irq(dev->def->irq, dev);
919         return -ENOMEM;
920 }
921
922 /* BHs disabled */
923 static int emac_link_differs(struct ocp_enet_private *dev)
924 {
925         u32 r = in_be32(&dev->emacp->mr1);
926
927         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
928         int speed, pause, asym_pause;
929
930         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
931                 speed = SPEED_1000;
932         else if (r & EMAC_MR1_MF_100)
933                 speed = SPEED_100;
934         else
935                 speed = SPEED_10;
936
937         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
938         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
939                 pause = 1;
940                 asym_pause = 0;
941                 break;
942         case EMAC_MR1_APP:
943                 pause = 0;
944                 asym_pause = 1;
945                 break;
946         default:
947                 pause = asym_pause = 0;
948         }
949         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
950             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
951 }
952
953 /* BHs disabled */
954 static void emac_link_timer(unsigned long data)
955 {
956         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
957         int link_poll_interval;
958
959         DBG2("%d: link timer" NL, dev->def->index);
960
961         if (dev->phy.def->ops->poll_link(&dev->phy)) {
962                 if (!netif_carrier_ok(dev->ndev)) {
963                         EMAC_RX_CLK_DEFAULT(dev->def->index);
964
965                         /* Get new link parameters */
966                         dev->phy.def->ops->read_link(&dev->phy);
967
968                         if (dev->tah_dev || emac_link_differs(dev))
969                                 emac_full_tx_reset(dev->ndev);
970
971                         netif_carrier_on(dev->ndev);
972                         emac_print_link_status(dev);
973                 }
974                 link_poll_interval = PHY_POLL_LINK_ON;
975         } else {
976                 if (netif_carrier_ok(dev->ndev)) {
977                         EMAC_RX_CLK_TX(dev->def->index);
978 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
979                         emac_reinitialize(dev);
980 #endif
981                         netif_carrier_off(dev->ndev);
982                         emac_print_link_status(dev);
983                 }
984
985                 /* Retry reset if the previous attempt failed.
986                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
987                  * case, but I left it here because it shouldn't trigger for
988                  * sane PHYs anyway.
989                  */
990                 if (unlikely(dev->reset_failed))
991                         emac_reinitialize(dev);
992
993                 link_poll_interval = PHY_POLL_LINK_OFF;
994         }
995         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
996 }
997
998 /* BHs disabled */
999 static void emac_force_link_update(struct ocp_enet_private *dev)
1000 {
1001         netif_carrier_off(dev->ndev);
1002         if (timer_pending(&dev->link_timer))
1003                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1004 }
1005
1006 /* Process ctx, rtnl_lock semaphore */
1007 static int emac_close(struct net_device *ndev)
1008 {
1009         struct ocp_enet_private *dev = ndev->priv;
1010         struct ocp_func_emac_data *emacdata = dev->def->additions;
1011
1012         DBG("%d: close" NL, dev->def->index);
1013
1014         local_bh_disable();
1015
1016         if (dev->phy.address >= 0)
1017                 del_timer_sync(&dev->link_timer);
1018
1019         netif_stop_queue(ndev);
1020         emac_rx_disable(dev);
1021         emac_tx_disable(dev);
1022         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1023         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1024         mal_poll_del(dev->mal, &dev->commac);
1025         local_bh_enable();
1026
1027         emac_clean_tx_ring(dev);
1028         emac_clean_rx_ring(dev);
1029         free_irq(dev->def->irq, dev);
1030
1031         return 0;
1032 }
1033
1034 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1035                                struct sk_buff *skb)
1036 {
1037 #if defined(CONFIG_IBM_EMAC_TAH)
1038         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1039                 ++dev->stats.tx_packets_csum;
1040                 return EMAC_TX_CTRL_TAH_CSUM;
1041         }
1042 #endif
1043         return 0;
1044 }
1045
1046 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1047 {
1048         struct emac_regs __iomem *p = dev->emacp;
1049         struct net_device *ndev = dev->ndev;
1050
1051         /* Send the packet out */
1052         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1053
1054         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1055                 netif_stop_queue(ndev);
1056                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1057         }
1058
1059         ndev->trans_start = jiffies;
1060         ++dev->stats.tx_packets;
1061         dev->stats.tx_bytes += len;
1062
1063         return 0;
1064 }
1065
1066 /* BHs disabled */
1067 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1068 {
1069         struct ocp_enet_private *dev = ndev->priv;
1070         unsigned int len = skb->len;
1071         int slot;
1072
1073         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1074             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1075
1076         slot = dev->tx_slot++;
1077         if (dev->tx_slot == NUM_TX_BUFF) {
1078                 dev->tx_slot = 0;
1079                 ctrl |= MAL_TX_CTRL_WRAP;
1080         }
1081
1082         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1083
1084         dev->tx_skb[slot] = skb;
1085         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1086                                                      DMA_TO_DEVICE);
1087         dev->tx_desc[slot].data_len = (u16) len;
1088         barrier();
1089         dev->tx_desc[slot].ctrl = ctrl;
1090
1091         return emac_xmit_finish(dev, len);
1092 }
1093
1094 #if defined(CONFIG_IBM_EMAC_TAH)
1095 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1096                                   u32 pd, int len, int last, u16 base_ctrl)
1097 {
1098         while (1) {
1099                 u16 ctrl = base_ctrl;
1100                 int chunk = min(len, MAL_MAX_TX_SIZE);
1101                 len -= chunk;
1102
1103                 slot = (slot + 1) % NUM_TX_BUFF;
1104
1105                 if (last && !len)
1106                         ctrl |= MAL_TX_CTRL_LAST;
1107                 if (slot == NUM_TX_BUFF - 1)
1108                         ctrl |= MAL_TX_CTRL_WRAP;
1109
1110                 dev->tx_skb[slot] = NULL;
1111                 dev->tx_desc[slot].data_ptr = pd;
1112                 dev->tx_desc[slot].data_len = (u16) chunk;
1113                 dev->tx_desc[slot].ctrl = ctrl;
1114                 ++dev->tx_cnt;
1115
1116                 if (!len)
1117                         break;
1118
1119                 pd += chunk;
1120         }
1121         return slot;
1122 }
1123
1124 /* BHs disabled (SG version for TAH equipped EMACs) */
1125 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1126 {
1127         struct ocp_enet_private *dev = ndev->priv;
1128         int nr_frags = skb_shinfo(skb)->nr_frags;
1129         int len = skb->len, chunk;
1130         int slot, i;
1131         u16 ctrl;
1132         u32 pd;
1133
1134         /* This is common "fast" path */
1135         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1136                 return emac_start_xmit(skb, ndev);
1137
1138         len -= skb->data_len;
1139
1140         /* Note, this is only an *estimation*, we can still run out of empty
1141          * slots because of the additional fragmentation into
1142          * MAL_MAX_TX_SIZE-sized chunks
1143          */
1144         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1145                 goto stop_queue;
1146
1147         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1148             emac_tx_csum(dev, skb);
1149         slot = dev->tx_slot;
1150
1151         /* skb data */
1152         dev->tx_skb[slot] = NULL;
1153         chunk = min(len, MAL_MAX_TX_SIZE);
1154         dev->tx_desc[slot].data_ptr = pd =
1155             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1156         dev->tx_desc[slot].data_len = (u16) chunk;
1157         len -= chunk;
1158         if (unlikely(len))
1159                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1160                                        ctrl);
1161         /* skb fragments */
1162         for (i = 0; i < nr_frags; ++i) {
1163                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1164                 len = frag->size;
1165
1166                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1167                         goto undo_frame;
1168
1169                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1170                                   DMA_TO_DEVICE);
1171
1172                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1173                                        ctrl);
1174         }
1175
1176         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1177              dev->tx_slot, slot);
1178
1179         /* Attach skb to the last slot so we don't release it too early */
1180         dev->tx_skb[slot] = skb;
1181
1182         /* Send the packet out */
1183         if (dev->tx_slot == NUM_TX_BUFF - 1)
1184                 ctrl |= MAL_TX_CTRL_WRAP;
1185         barrier();
1186         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1187         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1188
1189         return emac_xmit_finish(dev, skb->len);
1190
1191       undo_frame:
1192         /* Well, too bad. Our previous estimation was overly optimistic. 
1193          * Undo everything.
1194          */
1195         while (slot != dev->tx_slot) {
1196                 dev->tx_desc[slot].ctrl = 0;
1197                 --dev->tx_cnt;
1198                 if (--slot < 0)
1199                         slot = NUM_TX_BUFF - 1;
1200         }
1201         ++dev->estats.tx_undo;
1202
1203       stop_queue:
1204         netif_stop_queue(ndev);
1205         DBG2("%d: stopped TX queue" NL, dev->def->index);
1206         return 1;
1207 }
1208 #else
1209 # define emac_start_xmit_sg     emac_start_xmit
1210 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1211
1212 /* BHs disabled */
1213 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1214 {
1215         struct ibm_emac_error_stats *st = &dev->estats;
1216         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1217
1218         ++st->tx_bd_errors;
1219         if (ctrl & EMAC_TX_ST_BFCS)
1220                 ++st->tx_bd_bad_fcs;
1221         if (ctrl & EMAC_TX_ST_LCS)
1222                 ++st->tx_bd_carrier_loss;
1223         if (ctrl & EMAC_TX_ST_ED)
1224                 ++st->tx_bd_excessive_deferral;
1225         if (ctrl & EMAC_TX_ST_EC)
1226                 ++st->tx_bd_excessive_collisions;
1227         if (ctrl & EMAC_TX_ST_LC)
1228                 ++st->tx_bd_late_collision;
1229         if (ctrl & EMAC_TX_ST_MC)
1230                 ++st->tx_bd_multple_collisions;
1231         if (ctrl & EMAC_TX_ST_SC)
1232                 ++st->tx_bd_single_collision;
1233         if (ctrl & EMAC_TX_ST_UR)
1234                 ++st->tx_bd_underrun;
1235         if (ctrl & EMAC_TX_ST_SQE)
1236                 ++st->tx_bd_sqe;
1237 }
1238
1239 static void emac_poll_tx(void *param)
1240 {
1241         struct ocp_enet_private *dev = param;
1242         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1243              dev->ack_slot);
1244
1245         if (dev->tx_cnt) {
1246                 u16 ctrl;
1247                 int slot = dev->ack_slot, n = 0;
1248               again:
1249                 ctrl = dev->tx_desc[slot].ctrl;
1250                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1251                         struct sk_buff *skb = dev->tx_skb[slot];
1252                         ++n;
1253
1254                         if (skb) {
1255                                 dev_kfree_skb(skb);
1256                                 dev->tx_skb[slot] = NULL;
1257                         }
1258                         slot = (slot + 1) % NUM_TX_BUFF;
1259
1260                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1261                                 emac_parse_tx_error(dev, ctrl);
1262
1263                         if (--dev->tx_cnt)
1264                                 goto again;
1265                 }
1266                 if (n) {
1267                         dev->ack_slot = slot;
1268                         if (netif_queue_stopped(dev->ndev) &&
1269                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1270                                 netif_wake_queue(dev->ndev);
1271
1272                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1273                 }
1274         }
1275 }
1276
1277 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1278                                        int len)
1279 {
1280         struct sk_buff *skb = dev->rx_skb[slot];
1281         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1282
1283         if (len) 
1284                 dma_map_single(dev->ldev, skb->data - 2, 
1285                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1286
1287         dev->rx_desc[slot].data_len = 0;
1288         barrier();
1289         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1290             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1291 }
1292
1293 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1294 {
1295         struct ibm_emac_error_stats *st = &dev->estats;
1296         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1297
1298         ++st->rx_bd_errors;
1299         if (ctrl & EMAC_RX_ST_OE)
1300                 ++st->rx_bd_overrun;
1301         if (ctrl & EMAC_RX_ST_BP)
1302                 ++st->rx_bd_bad_packet;
1303         if (ctrl & EMAC_RX_ST_RP)
1304                 ++st->rx_bd_runt_packet;
1305         if (ctrl & EMAC_RX_ST_SE)
1306                 ++st->rx_bd_short_event;
1307         if (ctrl & EMAC_RX_ST_AE)
1308                 ++st->rx_bd_alignment_error;
1309         if (ctrl & EMAC_RX_ST_BFCS)
1310                 ++st->rx_bd_bad_fcs;
1311         if (ctrl & EMAC_RX_ST_PTL)
1312                 ++st->rx_bd_packet_too_long;
1313         if (ctrl & EMAC_RX_ST_ORE)
1314                 ++st->rx_bd_out_of_range;
1315         if (ctrl & EMAC_RX_ST_IRE)
1316                 ++st->rx_bd_in_range;
1317 }
1318
1319 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1320                                 struct sk_buff *skb, u16 ctrl)
1321 {
1322 #if defined(CONFIG_IBM_EMAC_TAH)
1323         if (!ctrl && dev->tah_dev) {
1324                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325                 ++dev->stats.rx_packets_csum;
1326         }
1327 #endif
1328 }
1329
1330 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1331 {
1332         if (likely(dev->rx_sg_skb != NULL)) {
1333                 int len = dev->rx_desc[slot].data_len;
1334                 int tot_len = dev->rx_sg_skb->len + len;
1335
1336                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1337                         ++dev->estats.rx_dropped_mtu;
1338                         dev_kfree_skb(dev->rx_sg_skb);
1339                         dev->rx_sg_skb = NULL;
1340                 } else {
1341                         cacheable_memcpy(dev->rx_sg_skb->tail,
1342                                          dev->rx_skb[slot]->data, len);
1343                         skb_put(dev->rx_sg_skb, len);
1344                         emac_recycle_rx_skb(dev, slot, len);
1345                         return 0;
1346                 }
1347         }
1348         emac_recycle_rx_skb(dev, slot, 0);
1349         return -1;
1350 }
1351
1352 /* BHs disabled */
1353 static int emac_poll_rx(void *param, int budget)
1354 {
1355         struct ocp_enet_private *dev = param;
1356         int slot = dev->rx_slot, received = 0;
1357
1358         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1359
1360       again:
1361         while (budget > 0) {
1362                 int len;
1363                 struct sk_buff *skb;
1364                 u16 ctrl = dev->rx_desc[slot].ctrl;
1365
1366                 if (ctrl & MAL_RX_CTRL_EMPTY)
1367                         break;
1368
1369                 skb = dev->rx_skb[slot];
1370                 barrier();
1371                 len = dev->rx_desc[slot].data_len;
1372
1373                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1374                         goto sg;
1375
1376                 ctrl &= EMAC_BAD_RX_MASK;
1377                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1378                         emac_parse_rx_error(dev, ctrl);
1379                         ++dev->estats.rx_dropped_error;
1380                         emac_recycle_rx_skb(dev, slot, 0);
1381                         len = 0;
1382                         goto next;
1383                 }
1384
1385                 if (len && len < EMAC_RX_COPY_THRESH) {
1386                         struct sk_buff *copy_skb =
1387                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1388                         if (unlikely(!copy_skb))
1389                                 goto oom;
1390
1391                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1392                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1393                                          len + 2);
1394                         emac_recycle_rx_skb(dev, slot, len);
1395                         skb = copy_skb;
1396                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1397                         goto oom;
1398
1399                 skb_put(skb, len);
1400               push_packet:
1401                 skb->dev = dev->ndev;
1402                 skb->protocol = eth_type_trans(skb, dev->ndev);
1403                 emac_rx_csum(dev, skb, ctrl);
1404
1405                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1406                         ++dev->estats.rx_dropped_stack;
1407               next:
1408                 ++dev->stats.rx_packets;
1409               skip:
1410                 dev->stats.rx_bytes += len;
1411                 slot = (slot + 1) % NUM_RX_BUFF;
1412                 --budget;
1413                 ++received;
1414                 continue;
1415               sg:
1416                 if (ctrl & MAL_RX_CTRL_FIRST) {
1417                         BUG_ON(dev->rx_sg_skb);
1418                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1419                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1420                                 ++dev->estats.rx_dropped_oom;
1421                                 emac_recycle_rx_skb(dev, slot, 0);
1422                         } else {
1423                                 dev->rx_sg_skb = skb;
1424                                 skb_put(skb, len);
1425                         }
1426                 } else if (!emac_rx_sg_append(dev, slot) &&
1427                            (ctrl & MAL_RX_CTRL_LAST)) {
1428
1429                         skb = dev->rx_sg_skb;
1430                         dev->rx_sg_skb = NULL;
1431
1432                         ctrl &= EMAC_BAD_RX_MASK;
1433                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1434                                 emac_parse_rx_error(dev, ctrl);
1435                                 ++dev->estats.rx_dropped_error;
1436                                 dev_kfree_skb(skb);
1437                                 len = 0;
1438                         } else
1439                                 goto push_packet;
1440                 }
1441                 goto skip;
1442               oom:
1443                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1444                 /* Drop the packet and recycle skb */
1445                 ++dev->estats.rx_dropped_oom;
1446                 emac_recycle_rx_skb(dev, slot, 0);
1447                 goto next;
1448         }
1449
1450         if (received) {
1451                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1452                 dev->rx_slot = slot;
1453         }
1454
1455         if (unlikely(budget && dev->commac.rx_stopped)) {
1456                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1457
1458                 barrier();
1459                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1460                         DBG2("%d: rx restart" NL, dev->def->index);
1461                         received = 0;
1462                         goto again;
1463                 }
1464
1465                 if (dev->rx_sg_skb) {
1466                         DBG2("%d: dropping partial rx packet" NL,
1467                              dev->def->index);
1468                         ++dev->estats.rx_dropped_error;
1469                         dev_kfree_skb(dev->rx_sg_skb);
1470                         dev->rx_sg_skb = NULL;
1471                 }
1472
1473                 dev->commac.rx_stopped = 0;
1474                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1475                 emac_rx_enable(dev);
1476                 dev->rx_slot = 0;
1477         }
1478         return received;
1479 }
1480
1481 /* BHs disabled */
1482 static int emac_peek_rx(void *param)
1483 {
1484         struct ocp_enet_private *dev = param;
1485         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1486 }
1487
1488 /* BHs disabled */
1489 static int emac_peek_rx_sg(void *param)
1490 {
1491         struct ocp_enet_private *dev = param;
1492         int slot = dev->rx_slot;
1493         while (1) {
1494                 u16 ctrl = dev->rx_desc[slot].ctrl;
1495                 if (ctrl & MAL_RX_CTRL_EMPTY)
1496                         return 0;
1497                 else if (ctrl & MAL_RX_CTRL_LAST)
1498                         return 1;
1499
1500                 slot = (slot + 1) % NUM_RX_BUFF;
1501
1502                 /* I'm just being paranoid here :) */
1503                 if (unlikely(slot == dev->rx_slot))
1504                         return 0;
1505         }
1506 }
1507
1508 /* Hard IRQ */
1509 static void emac_rxde(void *param)
1510 {
1511         struct ocp_enet_private *dev = param;
1512         ++dev->estats.rx_stopped;
1513         emac_rx_disable_async(dev);
1514 }
1515
1516 /* Hard IRQ */
1517 static irqreturn_t emac_irq(int irq, void *dev_instance)
1518 {
1519         struct ocp_enet_private *dev = dev_instance;
1520         struct emac_regs __iomem *p = dev->emacp;
1521         struct ibm_emac_error_stats *st = &dev->estats;
1522
1523         u32 isr = in_be32(&p->isr);
1524         out_be32(&p->isr, isr);
1525
1526         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1527
1528         if (isr & EMAC_ISR_TXPE)
1529                 ++st->tx_parity;
1530         if (isr & EMAC_ISR_RXPE)
1531                 ++st->rx_parity;
1532         if (isr & EMAC_ISR_TXUE)
1533                 ++st->tx_underrun;
1534         if (isr & EMAC_ISR_RXOE)
1535                 ++st->rx_fifo_overrun;
1536         if (isr & EMAC_ISR_OVR)
1537                 ++st->rx_overrun;
1538         if (isr & EMAC_ISR_BP)
1539                 ++st->rx_bad_packet;
1540         if (isr & EMAC_ISR_RP)
1541                 ++st->rx_runt_packet;
1542         if (isr & EMAC_ISR_SE)
1543                 ++st->rx_short_event;
1544         if (isr & EMAC_ISR_ALE)
1545                 ++st->rx_alignment_error;
1546         if (isr & EMAC_ISR_BFCS)
1547                 ++st->rx_bad_fcs;
1548         if (isr & EMAC_ISR_PTLE)
1549                 ++st->rx_packet_too_long;
1550         if (isr & EMAC_ISR_ORE)
1551                 ++st->rx_out_of_range;
1552         if (isr & EMAC_ISR_IRE)
1553                 ++st->rx_in_range;
1554         if (isr & EMAC_ISR_SQE)
1555                 ++st->tx_sqe;
1556         if (isr & EMAC_ISR_TE)
1557                 ++st->tx_errors;
1558
1559         return IRQ_HANDLED;
1560 }
1561
1562 static struct net_device_stats *emac_stats(struct net_device *ndev)
1563 {
1564         struct ocp_enet_private *dev = ndev->priv;
1565         struct ibm_emac_stats *st = &dev->stats;
1566         struct ibm_emac_error_stats *est = &dev->estats;
1567         struct net_device_stats *nst = &dev->nstats;
1568
1569         DBG2("%d: stats" NL, dev->def->index);
1570
1571         /* Compute "legacy" statistics */
1572         local_irq_disable();
1573         nst->rx_packets = (unsigned long)st->rx_packets;
1574         nst->rx_bytes = (unsigned long)st->rx_bytes;
1575         nst->tx_packets = (unsigned long)st->tx_packets;
1576         nst->tx_bytes = (unsigned long)st->tx_bytes;
1577         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1578                                           est->rx_dropped_error +
1579                                           est->rx_dropped_resize +
1580                                           est->rx_dropped_mtu);
1581         nst->tx_dropped = (unsigned long)est->tx_dropped;
1582
1583         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1584         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1585                                               est->rx_fifo_overrun +
1586                                               est->rx_overrun);
1587         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1588                                                est->rx_alignment_error);
1589         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1590                                              est->rx_bad_fcs);
1591         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1592                                                 est->rx_bd_short_event +
1593                                                 est->rx_bd_packet_too_long +
1594                                                 est->rx_bd_out_of_range +
1595                                                 est->rx_bd_in_range +
1596                                                 est->rx_runt_packet +
1597                                                 est->rx_short_event +
1598                                                 est->rx_packet_too_long +
1599                                                 est->rx_out_of_range +
1600                                                 est->rx_in_range);
1601
1602         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1603         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1604                                               est->tx_underrun);
1605         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1606         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1607                                           est->tx_bd_excessive_collisions +
1608                                           est->tx_bd_late_collision +
1609                                           est->tx_bd_multple_collisions);
1610         local_irq_enable();
1611         return nst;
1612 }
1613
1614 static void emac_remove(struct ocp_device *ocpdev)
1615 {
1616         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1617
1618         DBG("%d: remove" NL, dev->def->index);
1619
1620         ocp_set_drvdata(ocpdev, NULL);
1621         unregister_netdev(dev->ndev);
1622
1623         tah_fini(dev->tah_dev);
1624         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1625         zmii_fini(dev->zmii_dev, dev->zmii_input);
1626
1627         emac_dbg_register(dev->def->index, NULL);
1628
1629         mal_unregister_commac(dev->mal, &dev->commac);
1630         iounmap(dev->emacp);
1631         kfree(dev->ndev);
1632 }
1633
1634 static struct mal_commac_ops emac_commac_ops = {
1635         .poll_tx = &emac_poll_tx,
1636         .poll_rx = &emac_poll_rx,
1637         .peek_rx = &emac_peek_rx,
1638         .rxde = &emac_rxde,
1639 };
1640
1641 static struct mal_commac_ops emac_commac_sg_ops = {
1642         .poll_tx = &emac_poll_tx,
1643         .poll_rx = &emac_poll_rx,
1644         .peek_rx = &emac_peek_rx_sg,
1645         .rxde = &emac_rxde,
1646 };
1647
1648 /* Ethtool support */
1649 static int emac_ethtool_get_settings(struct net_device *ndev,
1650                                      struct ethtool_cmd *cmd)
1651 {
1652         struct ocp_enet_private *dev = ndev->priv;
1653
1654         cmd->supported = dev->phy.features;
1655         cmd->port = PORT_MII;
1656         cmd->phy_address = dev->phy.address;
1657         cmd->transceiver =
1658             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1659
1660         local_bh_disable();
1661         cmd->advertising = dev->phy.advertising;
1662         cmd->autoneg = dev->phy.autoneg;
1663         cmd->speed = dev->phy.speed;
1664         cmd->duplex = dev->phy.duplex;
1665         local_bh_enable();
1666
1667         return 0;
1668 }
1669
1670 static int emac_ethtool_set_settings(struct net_device *ndev,
1671                                      struct ethtool_cmd *cmd)
1672 {
1673         struct ocp_enet_private *dev = ndev->priv;
1674         u32 f = dev->phy.features;
1675
1676         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1677             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1678
1679         /* Basic sanity checks */
1680         if (dev->phy.address < 0)
1681                 return -EOPNOTSUPP;
1682         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1683                 return -EINVAL;
1684         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1685                 return -EINVAL;
1686         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1687                 return -EINVAL;
1688
1689         if (cmd->autoneg == AUTONEG_DISABLE) {
1690                 switch (cmd->speed) {
1691                 case SPEED_10:
1692                         if (cmd->duplex == DUPLEX_HALF
1693                             && !(f & SUPPORTED_10baseT_Half))
1694                                 return -EINVAL;
1695                         if (cmd->duplex == DUPLEX_FULL
1696                             && !(f & SUPPORTED_10baseT_Full))
1697                                 return -EINVAL;
1698                         break;
1699                 case SPEED_100:
1700                         if (cmd->duplex == DUPLEX_HALF
1701                             && !(f & SUPPORTED_100baseT_Half))
1702                                 return -EINVAL;
1703                         if (cmd->duplex == DUPLEX_FULL
1704                             && !(f & SUPPORTED_100baseT_Full))
1705                                 return -EINVAL;
1706                         break;
1707                 case SPEED_1000:
1708                         if (cmd->duplex == DUPLEX_HALF
1709                             && !(f & SUPPORTED_1000baseT_Half))
1710                                 return -EINVAL;
1711                         if (cmd->duplex == DUPLEX_FULL
1712                             && !(f & SUPPORTED_1000baseT_Full))
1713                                 return -EINVAL;
1714                         break;
1715                 default:
1716                         return -EINVAL;
1717                 }
1718
1719                 local_bh_disable();
1720                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1721                                                 cmd->duplex);
1722
1723         } else {
1724                 if (!(f & SUPPORTED_Autoneg))
1725                         return -EINVAL;
1726
1727                 local_bh_disable();
1728                 dev->phy.def->ops->setup_aneg(&dev->phy,
1729                                               (cmd->advertising & f) |
1730                                               (dev->phy.advertising &
1731                                                (ADVERTISED_Pause |
1732                                                 ADVERTISED_Asym_Pause)));
1733         }
1734         emac_force_link_update(dev);
1735         local_bh_enable();
1736
1737         return 0;
1738 }
1739
1740 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1741                                        struct ethtool_ringparam *rp)
1742 {
1743         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1744         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1745 }
1746
1747 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1748                                         struct ethtool_pauseparam *pp)
1749 {
1750         struct ocp_enet_private *dev = ndev->priv;
1751
1752         local_bh_disable();
1753         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1754             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1755                 pp->autoneg = 1;
1756
1757         if (dev->phy.duplex == DUPLEX_FULL) {
1758                 if (dev->phy.pause)
1759                         pp->rx_pause = pp->tx_pause = 1;
1760                 else if (dev->phy.asym_pause)
1761                         pp->tx_pause = 1;
1762         }
1763         local_bh_enable();
1764 }
1765
1766 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1767 {
1768         struct ocp_enet_private *dev = ndev->priv;
1769         return dev->tah_dev != 0;
1770 }
1771
1772 static int emac_get_regs_len(struct ocp_enet_private *dev)
1773 {
1774         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1775 }
1776
1777 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1778 {
1779         struct ocp_enet_private *dev = ndev->priv;
1780         return sizeof(struct emac_ethtool_regs_hdr) +
1781             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1782             zmii_get_regs_len(dev->zmii_dev) +
1783             rgmii_get_regs_len(dev->rgmii_dev) +
1784             tah_get_regs_len(dev->tah_dev);
1785 }
1786
1787 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1788 {
1789         struct emac_ethtool_regs_subhdr *hdr = buf;
1790
1791         hdr->version = EMAC_ETHTOOL_REGS_VER;
1792         hdr->index = dev->def->index;
1793         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1794         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1795 }
1796
1797 static void emac_ethtool_get_regs(struct net_device *ndev,
1798                                   struct ethtool_regs *regs, void *buf)
1799 {
1800         struct ocp_enet_private *dev = ndev->priv;
1801         struct emac_ethtool_regs_hdr *hdr = buf;
1802
1803         hdr->components = 0;
1804         buf = hdr + 1;
1805
1806         local_irq_disable();
1807         buf = mal_dump_regs(dev->mal, buf);
1808         buf = emac_dump_regs(dev, buf);
1809         if (dev->zmii_dev) {
1810                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1811                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1812         }
1813         if (dev->rgmii_dev) {
1814                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1815                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1816         }
1817         if (dev->tah_dev) {
1818                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1819                 buf = tah_dump_regs(dev->tah_dev, buf);
1820         }
1821         local_irq_enable();
1822 }
1823
1824 static int emac_ethtool_nway_reset(struct net_device *ndev)
1825 {
1826         struct ocp_enet_private *dev = ndev->priv;
1827         int res = 0;
1828
1829         DBG("%d: nway_reset" NL, dev->def->index);
1830
1831         if (dev->phy.address < 0)
1832                 return -EOPNOTSUPP;
1833
1834         local_bh_disable();
1835         if (!dev->phy.autoneg) {
1836                 res = -EINVAL;
1837                 goto out;
1838         }
1839
1840         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1841         emac_force_link_update(dev);
1842
1843       out:
1844         local_bh_enable();
1845         return res;
1846 }
1847
1848 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1849 {
1850         return EMAC_ETHTOOL_STATS_COUNT;
1851 }
1852
1853 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1854                                      u8 * buf)
1855 {
1856         if (stringset == ETH_SS_STATS)
1857                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1858 }
1859
1860 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1861                                            struct ethtool_stats *estats,
1862                                            u64 * tmp_stats)
1863 {
1864         struct ocp_enet_private *dev = ndev->priv;
1865         local_irq_disable();
1866         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1867         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1868         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1869         local_irq_enable();
1870 }
1871
1872 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1873                                      struct ethtool_drvinfo *info)
1874 {
1875         struct ocp_enet_private *dev = ndev->priv;
1876
1877         strcpy(info->driver, "ibm_emac");
1878         strcpy(info->version, DRV_VERSION);
1879         info->fw_version[0] = '\0';
1880         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1881         info->n_stats = emac_ethtool_get_stats_count(ndev);
1882         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1883 }
1884
1885 static const struct ethtool_ops emac_ethtool_ops = {
1886         .get_settings = emac_ethtool_get_settings,
1887         .set_settings = emac_ethtool_set_settings,
1888         .get_drvinfo = emac_ethtool_get_drvinfo,
1889
1890         .get_regs_len = emac_ethtool_get_regs_len,
1891         .get_regs = emac_ethtool_get_regs,
1892
1893         .nway_reset = emac_ethtool_nway_reset,
1894
1895         .get_ringparam = emac_ethtool_get_ringparam,
1896         .get_pauseparam = emac_ethtool_get_pauseparam,
1897
1898         .get_rx_csum = emac_ethtool_get_rx_csum,
1899
1900         .get_strings = emac_ethtool_get_strings,
1901         .get_stats_count = emac_ethtool_get_stats_count,
1902         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1903
1904         .get_link = ethtool_op_get_link,
1905         .get_tx_csum = ethtool_op_get_tx_csum,
1906         .get_sg = ethtool_op_get_sg,
1907 };
1908
1909 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1910 {
1911         struct ocp_enet_private *dev = ndev->priv;
1912         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1913
1914         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1915
1916         if (dev->phy.address < 0)
1917                 return -EOPNOTSUPP;
1918
1919         switch (cmd) {
1920         case SIOCGMIIPHY:
1921         case SIOCDEVPRIVATE:
1922                 data[0] = dev->phy.address;
1923                 /* Fall through */
1924         case SIOCGMIIREG:
1925         case SIOCDEVPRIVATE + 1:
1926                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1927                 return 0;
1928
1929         case SIOCSMIIREG:
1930         case SIOCDEVPRIVATE + 2:
1931                 if (!capable(CAP_NET_ADMIN))
1932                         return -EPERM;
1933                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1934                 return 0;
1935         default:
1936                 return -EOPNOTSUPP;
1937         }
1938 }
1939
1940 static int __init emac_probe(struct ocp_device *ocpdev)
1941 {
1942         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1943         struct net_device *ndev;
1944         struct ocp_device *maldev;
1945         struct ocp_enet_private *dev;
1946         int err, i;
1947
1948         DBG("%d: probe" NL, ocpdev->def->index);
1949
1950         if (!emacdata) {
1951                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1952                        ocpdev->def->index);
1953                 return -ENODEV;
1954         }
1955
1956         /* Allocate our net_device structure */
1957         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1958         if (!ndev) {
1959                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1960                        ocpdev->def->index);
1961                 return -ENOMEM;
1962         }
1963         dev = ndev->priv;
1964         dev->ndev = ndev;
1965         dev->ldev = &ocpdev->dev;
1966         dev->def = ocpdev->def;
1967         SET_MODULE_OWNER(ndev);
1968
1969         /* Find MAL device we are connected to */
1970         maldev =
1971             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1972         if (!maldev) {
1973                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1974                        dev->def->index, emacdata->mal_idx);
1975                 err = -ENODEV;
1976                 goto out;
1977         }
1978         dev->mal = ocp_get_drvdata(maldev);
1979         if (!dev->mal) {
1980                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1981                        dev->def->index, emacdata->mal_idx);
1982                 err = -ENODEV;
1983                 goto out;
1984         }
1985
1986         /* Register with MAL */
1987         dev->commac.ops = &emac_commac_ops;
1988         dev->commac.dev = dev;
1989         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1990         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1991         err = mal_register_commac(dev->mal, &dev->commac);
1992         if (err) {
1993                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1994                        dev->def->index, emacdata->mal_idx);
1995                 goto out;
1996         }
1997         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1998         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1999
2000         /* Get pointers to BD rings */
2001         dev->tx_desc =
2002             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2003                                                  emacdata->mal_tx_chan);
2004         dev->rx_desc =
2005             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2006                                                  emacdata->mal_rx_chan);
2007
2008         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2009         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2010
2011         /* Clean rings */
2012         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2013         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2014
2015         /* If we depend on another EMAC for MDIO, check whether it was probed already */
2016         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2017                 struct ocp_device *mdiodev =
2018                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2019                                     emacdata->mdio_idx);
2020                 if (!mdiodev) {
2021                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2022                                dev->def->index, emacdata->mdio_idx);
2023                         err = -ENODEV;
2024                         goto out2;
2025                 }
2026                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2027                 if (!dev->mdio_dev) {
2028                         printk(KERN_ERR
2029                                "emac%d: emac%d hasn't been initialized yet!\n",
2030                                dev->def->index, emacdata->mdio_idx);
2031                         err = -ENODEV;
2032                         goto out2;
2033                 }
2034         }
2035
2036         /* Attach to ZMII, if needed */
2037         if ((err = zmii_attach(dev)) != 0)
2038                 goto out2;
2039
2040         /* Attach to RGMII, if needed */
2041         if ((err = rgmii_attach(dev)) != 0)
2042                 goto out3;
2043
2044         /* Attach to TAH, if needed */
2045         if ((err = tah_attach(dev)) != 0)
2046                 goto out4;
2047
2048         /* Map EMAC regs */
2049         dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2050         if (!dev->emacp) {
2051                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2052                        dev->def->index);
2053                 err = -ENOMEM;
2054                 goto out5;
2055         }
2056
2057         /* Fill in MAC address */
2058         for (i = 0; i < 6; ++i)
2059                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2060
2061         /* Set some link defaults before we can find out real parameters */
2062         dev->phy.speed = SPEED_100;
2063         dev->phy.duplex = DUPLEX_FULL;
2064         dev->phy.autoneg = AUTONEG_DISABLE;
2065         dev->phy.pause = dev->phy.asym_pause = 0;
2066         dev->stop_timeout = STOP_TIMEOUT_100;
2067         init_timer(&dev->link_timer);
2068         dev->link_timer.function = emac_link_timer;
2069         dev->link_timer.data = (unsigned long)dev;
2070
2071         /* Find PHY if any */
2072         dev->phy.dev = ndev;
2073         dev->phy.mode = emacdata->phy_mode;
2074         if (emacdata->phy_map != 0xffffffff) {
2075                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2076                 u32 adv;
2077
2078                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2079                     emacdata->phy_map, busy_phy_map);
2080
2081                 EMAC_RX_CLK_TX(dev->def->index);
2082
2083                 dev->phy.mdio_read = emac_mdio_read;
2084                 dev->phy.mdio_write = emac_mdio_write;
2085
2086                 /* Configure EMAC with defaults so we can at least use MDIO
2087                  * This is needed mostly for 440GX
2088                  */
2089                 if (emac_phy_gpcs(dev->phy.mode)) {
2090                         /* XXX
2091                          * Make GPCS PHY address equal to EMAC index.
2092                          * We probably should take into account busy_phy_map
2093                          * and/or phy_map here.
2094                          */
2095                         dev->phy.address = dev->def->index;
2096                 }
2097                 
2098                 emac_configure(dev);
2099
2100                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2101                         if (!(phy_map & 1)) {
2102                                 int r;
2103                                 busy_phy_map |= 1 << i;
2104
2105                                 /* Quick check if there is a PHY at the address */
2106                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2107                                 if (r == 0xffff || r < 0)
2108                                         continue;
2109                                 if (!mii_phy_probe(&dev->phy, i))
2110                                         break;
2111                         }
2112                 if (i == 0x20) {
2113                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2114                                dev->def->index);
2115                         goto out6;
2116                 }
2117
2118                 /* Init PHY */
2119                 if (dev->phy.def->ops->init)
2120                         dev->phy.def->ops->init(&dev->phy);
2121                 
2122                 /* Disable any PHY features not supported by the platform */
2123                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2124
2125                 /* Setup initial link parameters */
2126                 if (dev->phy.features & SUPPORTED_Autoneg) {
2127                         adv = dev->phy.features;
2128 #if !defined(CONFIG_40x)
2129                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2130 #endif
2131                         /* Restart autonegotiation */
2132                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2133                 } else {
2134                         u32 f = dev->phy.def->features;
2135                         int speed = SPEED_10, fd = DUPLEX_HALF;
2136
2137                         /* Select highest supported speed/duplex */
2138                         if (f & SUPPORTED_1000baseT_Full) {
2139                                 speed = SPEED_1000;
2140                                 fd = DUPLEX_FULL;
2141                         } else if (f & SUPPORTED_1000baseT_Half)
2142                                 speed = SPEED_1000;
2143                         else if (f & SUPPORTED_100baseT_Full) {
2144                                 speed = SPEED_100;
2145                                 fd = DUPLEX_FULL;
2146                         } else if (f & SUPPORTED_100baseT_Half)
2147                                 speed = SPEED_100;
2148                         else if (f & SUPPORTED_10baseT_Full)
2149                                 fd = DUPLEX_FULL;
2150
2151                         /* Force link parameters */
2152                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2153                 }
2154         } else {
2155                 emac_reset(dev);
2156
2157                 /* PHY-less configuration.
2158                  * XXX I probably should move these settings to emacdata
2159                  */
2160                 dev->phy.address = -1;
2161                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2162                 dev->phy.pause = 1;
2163         }
2164
2165         /* Fill in the driver function table */
2166         ndev->open = &emac_open;
2167         if (dev->tah_dev) {
2168                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2169                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2170         } else
2171                 ndev->hard_start_xmit = &emac_start_xmit;
2172         ndev->tx_timeout = &emac_full_tx_reset;
2173         ndev->watchdog_timeo = 5 * HZ;
2174         ndev->stop = &emac_close;
2175         ndev->get_stats = &emac_stats;
2176         ndev->set_multicast_list = &emac_set_multicast_list;
2177         ndev->do_ioctl = &emac_ioctl;
2178         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2179                 ndev->change_mtu = &emac_change_mtu;
2180                 dev->commac.ops = &emac_commac_sg_ops;
2181         }
2182         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2183
2184         netif_carrier_off(ndev);
2185         netif_stop_queue(ndev);
2186
2187         err = register_netdev(ndev);
2188         if (err) {
2189                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2190                        dev->def->index, err);
2191                 goto out6;
2192         }
2193
2194         ocp_set_drvdata(ocpdev, dev);
2195
2196         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2197                ndev->name, dev->def->index,
2198                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2199                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2200
2201         if (dev->phy.address >= 0)
2202                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2203                        dev->phy.def->name, dev->phy.address);
2204
2205         emac_dbg_register(dev->def->index, dev);
2206
2207         return 0;
2208       out6:
2209         iounmap(dev->emacp);
2210       out5:
2211         tah_fini(dev->tah_dev);
2212       out4:
2213         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2214       out3:
2215         zmii_fini(dev->zmii_dev, dev->zmii_input);
2216       out2:
2217         mal_unregister_commac(dev->mal, &dev->commac);
2218       out:
2219         kfree(ndev);
2220         return err;
2221 }
2222
2223 static struct ocp_device_id emac_ids[] = {
2224         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2225         { .vendor = OCP_VENDOR_INVALID}
2226 };
2227
2228 static struct ocp_driver emac_driver = {
2229         .name = "emac",
2230         .id_table = emac_ids,
2231         .probe = emac_probe,
2232         .remove = emac_remove,
2233 };
2234
2235 static int __init emac_init(void)
2236 {
2237         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2238
2239         DBG(": init" NL);
2240
2241         if (mal_init())
2242                 return -ENODEV;
2243
2244         EMAC_CLK_INTERNAL;
2245         if (ocp_register_driver(&emac_driver)) {
2246                 EMAC_CLK_EXTERNAL;
2247                 ocp_unregister_driver(&emac_driver);
2248                 mal_exit();
2249                 return -ENODEV;
2250         }
2251         EMAC_CLK_EXTERNAL;
2252
2253         emac_init_debug();
2254         return 0;
2255 }
2256
2257 static void __exit emac_exit(void)
2258 {
2259         DBG(": exit" NL);
2260         ocp_unregister_driver(&emac_driver);
2261         mal_exit();
2262         emac_fini_debug();
2263 }
2264
2265 module_init(emac_init);
2266 module_exit(emac_exit);