[PATCH] rt2x00: Small optimizations
[linux-2.6] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/crc32.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/bitops.h>
37
38 #include <asm/processor.h>
39 #include <asm/io.h>
40 #include <asm/dma.h>
41 #include <asm/uaccess.h>
42 #include <asm/ocp.h>
43
44 #include "ibm_emac_core.h"
45 #include "ibm_emac_debug.h"
46
47 /*
48  * Lack of dma_unmap_???? calls is intentional.
49  *
50  * API-correct usage requires additional support state information to be 
51  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
52  * EMAC design (e.g. TX buffer passed from network stack can be split into
53  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
54  * maintaining such information will add additional overhead.
55  * Current DMA API implementation for 4xx processors only ensures cache coherency
56  * and dma_unmap_???? routines are empty and are likely to stay this way.
57  * I decided to omit dma_unmap_??? calls because I don't want to add additional
58  * complexity just for the sake of following some abstract API, when it doesn't
59  * add any real benefit to the driver. I understand that this decision maybe 
60  * controversial, but I really tried to make code API-correct and efficient 
61  * at the same time and didn't come up with code I liked :(.                --ebs
62  */
63
64 #define DRV_NAME        "emac"
65 #define DRV_VERSION     "3.54"
66 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
67
68 MODULE_DESCRIPTION(DRV_DESC);
69 MODULE_AUTHOR
70     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
71 MODULE_LICENSE("GPL");
72
73 /* minimum number of free TX descriptors required to wake up TX process */
74 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
75
76 /* If packet size is less than this number, we allocate small skb and copy packet 
77  * contents into it instead of just sending original big skb up
78  */
79 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
80
81 /* Since multiple EMACs share MDIO lines in various ways, we need
82  * to avoid re-using the same PHY ID in cases where the arch didn't
83  * setup precise phy_map entries
84  */
85 static u32 busy_phy_map;
86
87 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
88     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
89 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
90  * with PHY RX clock problem.
91  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
92  * also allows controlling each EMAC clock
93  */
94 static inline void EMAC_RX_CLK_TX(int idx)
95 {
96         unsigned long flags;
97         local_irq_save(flags);
98
99 #if defined(CONFIG_405EP)
100         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
101 #else /* CONFIG_440EP || CONFIG_440GR */
102         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
103 #endif
104
105         local_irq_restore(flags);
106 }
107
108 static inline void EMAC_RX_CLK_DEFAULT(int idx)
109 {
110         unsigned long flags;
111         local_irq_save(flags);
112
113 #if defined(CONFIG_405EP)
114         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
115 #else /* CONFIG_440EP */
116         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
117 #endif
118
119         local_irq_restore(flags);
120 }
121 #else
122 #define EMAC_RX_CLK_TX(idx)             ((void)0)
123 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
124 #endif
125
126 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
127 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
128  * unfortunately this is less flexible than 440EP case, because it's a global 
129  * setting for all EMACs, therefore we do this clock trick only during probe.
130  */
131 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
132                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
133 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
134                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
135 #else
136 #define EMAC_CLK_INTERNAL               ((void)0)
137 #define EMAC_CLK_EXTERNAL               ((void)0)
138 #endif
139
140 /* I don't want to litter system log with timeout errors 
141  * when we have brain-damaged PHY.
142  */
143 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
144                                              const char *error)
145 {
146 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
147         DBG("%d: %s" NL, dev->def->index, error);
148 #else
149         if (net_ratelimit())
150                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
151 #endif
152 }
153
154 /* PHY polling intervals */
155 #define PHY_POLL_LINK_ON        HZ
156 #define PHY_POLL_LINK_OFF       (HZ / 5)
157
158 /* Graceful stop timeouts in us. 
159  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
160  */
161 #define STOP_TIMEOUT_10         1230    
162 #define STOP_TIMEOUT_100        124
163 #define STOP_TIMEOUT_1000       13
164 #define STOP_TIMEOUT_1000_JUMBO 73
165
166 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
167 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
168         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
169         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
170         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
171         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
172         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
173         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
174         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
175         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
176         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
177         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
178         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
179         "tx_bd_excessive_collisions", "tx_bd_late_collision",
180         "tx_bd_multple_collisions", "tx_bd_single_collision",
181         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
182         "tx_errors"
183 };
184
185 static irqreturn_t emac_irq(int irq, void *dev_instance);
186 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
187
188 static inline int emac_phy_supports_gige(int phy_mode)
189 {
190         return  phy_mode == PHY_MODE_GMII ||
191                 phy_mode == PHY_MODE_RGMII ||
192                 phy_mode == PHY_MODE_TBI ||
193                 phy_mode == PHY_MODE_RTBI;
194 }
195
196 static inline int emac_phy_gpcs(int phy_mode)
197 {
198         return  phy_mode == PHY_MODE_TBI ||
199                 phy_mode == PHY_MODE_RTBI;
200 }
201
202 static inline void emac_tx_enable(struct ocp_enet_private *dev)
203 {
204         struct emac_regs __iomem *p = dev->emacp;
205         unsigned long flags;
206         u32 r;
207
208         local_irq_save(flags);
209
210         DBG("%d: tx_enable" NL, dev->def->index);
211
212         r = in_be32(&p->mr0);
213         if (!(r & EMAC_MR0_TXE))
214                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
215         local_irq_restore(flags);
216 }
217
218 static void emac_tx_disable(struct ocp_enet_private *dev)
219 {
220         struct emac_regs __iomem *p = dev->emacp;
221         unsigned long flags;
222         u32 r;
223
224         local_irq_save(flags);
225
226         DBG("%d: tx_disable" NL, dev->def->index);
227
228         r = in_be32(&p->mr0);
229         if (r & EMAC_MR0_TXE) {
230                 int n = dev->stop_timeout;
231                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
232                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
233                         udelay(1);
234                         --n;
235                 }       
236                 if (unlikely(!n))
237                         emac_report_timeout_error(dev, "TX disable timeout");
238         }
239         local_irq_restore(flags);
240 }
241
242 static void emac_rx_enable(struct ocp_enet_private *dev)
243 {
244         struct emac_regs __iomem *p = dev->emacp;
245         unsigned long flags;
246         u32 r;
247
248         local_irq_save(flags);
249         if (unlikely(dev->commac.rx_stopped))
250                 goto out;
251
252         DBG("%d: rx_enable" NL, dev->def->index);
253
254         r = in_be32(&p->mr0);
255         if (!(r & EMAC_MR0_RXE)) {
256                 if (unlikely(!(r & EMAC_MR0_RXI))) {
257                         /* Wait if previous async disable is still in progress */
258                         int n = dev->stop_timeout;
259                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260                                 udelay(1);
261                                 --n;
262                         }       
263                         if (unlikely(!n))
264                                 emac_report_timeout_error(dev,
265                                                           "RX disable timeout");
266                 }
267                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
268         }
269       out:
270         local_irq_restore(flags);
271 }
272
273 static void emac_rx_disable(struct ocp_enet_private *dev)
274 {
275         struct emac_regs __iomem *p = dev->emacp;
276         unsigned long flags;
277         u32 r;
278
279         local_irq_save(flags);
280
281         DBG("%d: rx_disable" NL, dev->def->index);
282
283         r = in_be32(&p->mr0);
284         if (r & EMAC_MR0_RXE) {
285                 int n = dev->stop_timeout;
286                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
287                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
288                         udelay(1);
289                         --n;
290                 }       
291                 if (unlikely(!n))
292                         emac_report_timeout_error(dev, "RX disable timeout");
293         }
294         local_irq_restore(flags);
295 }
296
297 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
298 {
299         struct emac_regs __iomem *p = dev->emacp;
300         unsigned long flags;
301         u32 r;
302
303         local_irq_save(flags);
304
305         DBG("%d: rx_disable_async" NL, dev->def->index);
306
307         r = in_be32(&p->mr0);
308         if (r & EMAC_MR0_RXE)
309                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
310         local_irq_restore(flags);
311 }
312
313 static int emac_reset(struct ocp_enet_private *dev)
314 {
315         struct emac_regs __iomem *p = dev->emacp;
316         unsigned long flags;
317         int n = 20;
318
319         DBG("%d: reset" NL, dev->def->index);
320
321         local_irq_save(flags);
322
323         if (!dev->reset_failed) {
324                 /* 40x erratum suggests stopping RX channel before reset,
325                  * we stop TX as well
326                  */
327                 emac_rx_disable(dev);
328                 emac_tx_disable(dev);
329         }
330
331         out_be32(&p->mr0, EMAC_MR0_SRST);
332         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
333                 --n;
334         local_irq_restore(flags);
335
336         if (n) {
337                 dev->reset_failed = 0;
338                 return 0;
339         } else {
340                 emac_report_timeout_error(dev, "reset timeout");
341                 dev->reset_failed = 1;
342                 return -ETIMEDOUT;
343         }
344 }
345
346 static void emac_hash_mc(struct ocp_enet_private *dev)
347 {
348         struct emac_regs __iomem *p = dev->emacp;
349         u16 gaht[4] = { 0 };
350         struct dev_mc_list *dmi;
351
352         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
353
354         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
355                 int bit;
356                 DECLARE_MAC_BUF(mac);
357                 DBG2("%d: mc %s" NL,
358                      dev->def->index, print_mac(mac, dmi->dmi_addr));
359
360                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
361                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
362         }
363         out_be32(&p->gaht1, gaht[0]);
364         out_be32(&p->gaht2, gaht[1]);
365         out_be32(&p->gaht3, gaht[2]);
366         out_be32(&p->gaht4, gaht[3]);
367 }
368
369 static inline u32 emac_iff2rmr(struct net_device *ndev)
370 {
371         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
372             EMAC_RMR_BASE;
373
374         if (ndev->flags & IFF_PROMISC)
375                 r |= EMAC_RMR_PME;
376         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
377                 r |= EMAC_RMR_PMME;
378         else if (ndev->mc_count > 0)
379                 r |= EMAC_RMR_MAE;
380
381         return r;
382 }
383
384 static inline int emac_opb_mhz(void)
385 {
386         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
387 }
388
389 /* BHs disabled */
390 static int emac_configure(struct ocp_enet_private *dev)
391 {
392         struct emac_regs __iomem *p = dev->emacp;
393         struct net_device *ndev = dev->ndev;
394         int gige;
395         u32 r;
396
397         DBG("%d: configure" NL, dev->def->index);
398
399         if (emac_reset(dev) < 0)
400                 return -ETIMEDOUT;
401
402         tah_reset(dev->tah_dev);
403
404         /* Mode register */
405         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
406         if (dev->phy.duplex == DUPLEX_FULL)
407                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
408         dev->stop_timeout = STOP_TIMEOUT_10;
409         switch (dev->phy.speed) {
410         case SPEED_1000:
411                 if (emac_phy_gpcs(dev->phy.mode)) {
412                         r |= EMAC_MR1_MF_1000GPCS |
413                             EMAC_MR1_MF_IPPA(dev->phy.address);
414
415                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
416                          * identify this GPCS PHY later.
417                          */
418                         out_be32(&p->ipcr, 0xdeadbeef);
419                 } else
420                         r |= EMAC_MR1_MF_1000;
421                 r |= EMAC_MR1_RFS_16K;
422                 gige = 1;
423
424                 if (dev->ndev->mtu > ETH_DATA_LEN) {
425                         r |= EMAC_MR1_JPSM;
426                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
427                 } else
428                         dev->stop_timeout = STOP_TIMEOUT_1000;
429                 break;
430         case SPEED_100:
431                 r |= EMAC_MR1_MF_100;
432                 dev->stop_timeout = STOP_TIMEOUT_100;
433                 /* Fall through */
434         default:
435                 r |= EMAC_MR1_RFS_4K;
436                 gige = 0;
437                 break;
438         }
439
440         if (dev->rgmii_dev)
441                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
442                                 dev->phy.speed);
443         else
444                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
445
446 #if !defined(CONFIG_40x)
447         /* on 40x erratum forces us to NOT use integrated flow control, 
448          * let's hope it works on 44x ;)
449          */
450         if (dev->phy.duplex == DUPLEX_FULL) {
451                 if (dev->phy.pause)
452                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
453                 else if (dev->phy.asym_pause)
454                         r |= EMAC_MR1_APP;
455         }
456 #endif
457         out_be32(&p->mr1, r);
458
459         /* Set individual MAC address */
460         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
461         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
462                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
463                  ndev->dev_addr[5]);
464
465         /* VLAN Tag Protocol ID */
466         out_be32(&p->vtpid, 0x8100);
467
468         /* Receive mode register */
469         r = emac_iff2rmr(ndev);
470         if (r & EMAC_RMR_MAE)
471                 emac_hash_mc(dev);
472         out_be32(&p->rmr, r);
473
474         /* FIFOs thresholds */
475         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
476                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
477         out_be32(&p->tmr1, r);
478         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
479
480         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
481            there should be still enough space in FIFO to allow the our link
482            partner time to process this frame and also time to send PAUSE 
483            frame itself.
484
485            Here is the worst case scenario for the RX FIFO "headroom"
486            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
487
488            1) One maximum-length frame on TX                    1522 bytes
489            2) One PAUSE frame time                                64 bytes
490            3) PAUSE frame decode time allowance                   64 bytes
491            4) One maximum-length frame on RX                    1522 bytes
492            5) Round-trip propagation delay of the link (100Mb)    15 bytes
493            ----------       
494            3187 bytes
495
496            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
497            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
498          */
499         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
500                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
501         out_be32(&p->rwmr, r);
502
503         /* Set PAUSE timer to the maximum */
504         out_be32(&p->ptr, 0xffff);
505
506         /* IRQ sources */
507         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
508                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
509                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
510                  EMAC_ISR_IRE | EMAC_ISR_TE);
511                  
512         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
513         if (emac_phy_gpcs(dev->phy.mode)) 
514                 mii_reset_phy(&dev->phy);
515                  
516         return 0;
517 }
518
519 /* BHs disabled */
520 static void emac_reinitialize(struct ocp_enet_private *dev)
521 {
522         DBG("%d: reinitialize" NL, dev->def->index);
523
524         if (!emac_configure(dev)) {
525                 emac_tx_enable(dev);
526                 emac_rx_enable(dev);
527         }
528 }
529
530 /* BHs disabled */
531 static void emac_full_tx_reset(struct net_device *ndev)
532 {
533         struct ocp_enet_private *dev = ndev->priv;
534         struct ocp_func_emac_data *emacdata = dev->def->additions;
535
536         DBG("%d: full_tx_reset" NL, dev->def->index);
537
538         emac_tx_disable(dev);
539         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
540         emac_clean_tx_ring(dev);
541         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
542
543         emac_configure(dev);
544
545         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
546         emac_tx_enable(dev);
547         emac_rx_enable(dev);
548
549         netif_wake_queue(ndev);
550 }
551
552 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
553 {
554         struct emac_regs __iomem *p = dev->emacp;
555         u32 r;
556         int n;
557
558         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
559
560         /* Enable proper MDIO port */
561         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
562
563         /* Wait for management interface to become idle */
564         n = 10;
565         while (!emac_phy_done(in_be32(&p->stacr))) {
566                 udelay(1);
567                 if (!--n)
568                         goto to;
569         }
570
571         /* Issue read command */
572         out_be32(&p->stacr,
573                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
574                  (reg & EMAC_STACR_PRA_MASK)
575                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
576                  | EMAC_STACR_START);
577
578         /* Wait for read to complete */
579         n = 100;
580         while (!emac_phy_done(r = in_be32(&p->stacr))) {
581                 udelay(1);
582                 if (!--n)
583                         goto to;
584         }
585
586         if (unlikely(r & EMAC_STACR_PHYE)) {
587                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
588                     id, reg);
589                 return -EREMOTEIO;
590         }
591
592         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
593         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
594         return r;
595       to:
596         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
597         return -ETIMEDOUT;
598 }
599
600 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
601                               u16 val)
602 {
603         struct emac_regs __iomem *p = dev->emacp;
604         int n;
605
606         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
607              val);
608
609         /* Enable proper MDIO port */
610         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
611
612         /* Wait for management interface to be idle */
613         n = 10;
614         while (!emac_phy_done(in_be32(&p->stacr))) {
615                 udelay(1);
616                 if (!--n)
617                         goto to;
618         }
619
620         /* Issue write command */
621         out_be32(&p->stacr,
622                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
623                  (reg & EMAC_STACR_PRA_MASK) |
624                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
625                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
626
627         /* Wait for write to complete */
628         n = 100;
629         while (!emac_phy_done(in_be32(&p->stacr))) {
630                 udelay(1);
631                 if (!--n)
632                         goto to;
633         }
634         return;
635       to:
636         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
637 }
638
639 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
640 {
641         struct ocp_enet_private *dev = ndev->priv;
642         int res;
643
644         local_bh_disable();
645         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
646                                (u8) reg);
647         local_bh_enable();
648         return res;
649 }
650
651 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
652 {
653         struct ocp_enet_private *dev = ndev->priv;
654
655         local_bh_disable();
656         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
657                           (u8) reg, (u16) val);
658         local_bh_enable();
659 }
660
661 /* BHs disabled */
662 static void emac_set_multicast_list(struct net_device *ndev)
663 {
664         struct ocp_enet_private *dev = ndev->priv;
665         struct emac_regs __iomem *p = dev->emacp;
666         u32 rmr = emac_iff2rmr(ndev);
667
668         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
669         BUG_ON(!netif_running(dev->ndev));
670
671         /* I decided to relax register access rules here to avoid
672          * full EMAC reset.
673          *
674          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
675          * in MR1 register and do a full EMAC reset.
676          * One TX BD status update is delayed and, after EMAC reset, it 
677          * never happens, resulting in TX hung (it'll be recovered by TX 
678          * timeout handler eventually, but this is just gross).
679          * So we either have to do full TX reset or try to cheat here :)
680          *
681          * The only required change is to RX mode register, so I *think* all
682          * we need is just to stop RX channel. This seems to work on all
683          * tested SoCs.                                                --ebs
684          */
685         emac_rx_disable(dev);
686         if (rmr & EMAC_RMR_MAE)
687                 emac_hash_mc(dev);
688         out_be32(&p->rmr, rmr);
689         emac_rx_enable(dev);
690 }
691
692 /* BHs disabled */
693 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
694 {
695         struct ocp_func_emac_data *emacdata = dev->def->additions;
696         int rx_sync_size = emac_rx_sync_size(new_mtu);
697         int rx_skb_size = emac_rx_skb_size(new_mtu);
698         int i, ret = 0;
699
700         emac_rx_disable(dev);
701         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
702
703         if (dev->rx_sg_skb) {
704                 ++dev->estats.rx_dropped_resize;
705                 dev_kfree_skb(dev->rx_sg_skb);
706                 dev->rx_sg_skb = NULL;
707         }
708
709         /* Make a first pass over RX ring and mark BDs ready, dropping 
710          * non-processed packets on the way. We need this as a separate pass
711          * to simplify error recovery in the case of allocation failure later.
712          */
713         for (i = 0; i < NUM_RX_BUFF; ++i) {
714                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
715                         ++dev->estats.rx_dropped_resize;
716
717                 dev->rx_desc[i].data_len = 0;
718                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
719                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
720         }
721
722         /* Reallocate RX ring only if bigger skb buffers are required */
723         if (rx_skb_size <= dev->rx_skb_size)
724                 goto skip;
725
726         /* Second pass, allocate new skbs */
727         for (i = 0; i < NUM_RX_BUFF; ++i) {
728                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
729                 if (!skb) {
730                         ret = -ENOMEM;
731                         goto oom;
732                 }
733
734                 BUG_ON(!dev->rx_skb[i]);
735                 dev_kfree_skb(dev->rx_skb[i]);
736
737                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
738                 dev->rx_desc[i].data_ptr =
739                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
740                                    DMA_FROM_DEVICE) + 2;
741                 dev->rx_skb[i] = skb;
742         }
743       skip:
744         /* Check if we need to change "Jumbo" bit in MR1 */
745         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
746                 /* This is to prevent starting RX channel in emac_rx_enable() */
747                 dev->commac.rx_stopped = 1;
748
749                 dev->ndev->mtu = new_mtu;
750                 emac_full_tx_reset(dev->ndev);
751         }
752
753         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
754       oom:
755         /* Restart RX */
756         dev->commac.rx_stopped = dev->rx_slot = 0;
757         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
758         emac_rx_enable(dev);
759
760         return ret;
761 }
762
763 /* Process ctx, rtnl_lock semaphore */
764 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
765 {
766         struct ocp_enet_private *dev = ndev->priv;
767         int ret = 0;
768
769         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
770                 return -EINVAL;
771
772         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
773
774         local_bh_disable();
775         if (netif_running(ndev)) {
776                 /* Check if we really need to reinitalize RX ring */
777                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
778                         ret = emac_resize_rx_ring(dev, new_mtu);
779         }
780
781         if (!ret) {
782                 ndev->mtu = new_mtu;
783                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
784                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
785         }       
786         local_bh_enable();
787
788         return ret;
789 }
790
791 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
792 {
793         int i;
794         for (i = 0; i < NUM_TX_BUFF; ++i) {
795                 if (dev->tx_skb[i]) {
796                         dev_kfree_skb(dev->tx_skb[i]);
797                         dev->tx_skb[i] = NULL;
798                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
799                                 ++dev->estats.tx_dropped;
800                 }
801                 dev->tx_desc[i].ctrl = 0;
802                 dev->tx_desc[i].data_ptr = 0;
803         }
804 }
805
806 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
807 {
808         int i;
809         for (i = 0; i < NUM_RX_BUFF; ++i)
810                 if (dev->rx_skb[i]) {
811                         dev->rx_desc[i].ctrl = 0;
812                         dev_kfree_skb(dev->rx_skb[i]);
813                         dev->rx_skb[i] = NULL;
814                         dev->rx_desc[i].data_ptr = 0;
815                 }
816
817         if (dev->rx_sg_skb) {
818                 dev_kfree_skb(dev->rx_sg_skb);
819                 dev->rx_sg_skb = NULL;
820         }
821 }
822
823 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
824                                     gfp_t flags)
825 {
826         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
827         if (unlikely(!skb))
828                 return -ENOMEM;
829
830         dev->rx_skb[slot] = skb;
831         dev->rx_desc[slot].data_len = 0;
832
833         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
834         dev->rx_desc[slot].data_ptr = 
835             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
836                            DMA_FROM_DEVICE) + 2;
837         barrier();
838         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
839             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
840
841         return 0;
842 }
843
844 static void emac_print_link_status(struct ocp_enet_private *dev)
845 {
846         if (netif_carrier_ok(dev->ndev))
847                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
848                        dev->ndev->name, dev->phy.speed,
849                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
850                        dev->phy.pause ? ", pause enabled" :
851                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
852         else
853                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
854 }
855
856 /* Process ctx, rtnl_lock semaphore */
857 static int emac_open(struct net_device *ndev)
858 {
859         struct ocp_enet_private *dev = ndev->priv;
860         struct ocp_func_emac_data *emacdata = dev->def->additions;
861         int err, i;
862
863         DBG("%d: open" NL, dev->def->index);
864
865         /* Setup error IRQ handler */
866         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
867         if (err) {
868                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
869                        ndev->name, dev->def->irq);
870                 return err;
871         }
872
873         /* Allocate RX ring */
874         for (i = 0; i < NUM_RX_BUFF; ++i)
875                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
876                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
877                                ndev->name);
878                         goto oom;
879                 }
880
881         local_bh_disable();
882         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
883             dev->commac.rx_stopped = 0;
884         dev->rx_sg_skb = NULL;
885
886         if (dev->phy.address >= 0) {
887                 int link_poll_interval;
888                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
889                         dev->phy.def->ops->read_link(&dev->phy);
890                         EMAC_RX_CLK_DEFAULT(dev->def->index);
891                         netif_carrier_on(dev->ndev);
892                         link_poll_interval = PHY_POLL_LINK_ON;
893                 } else {
894                         EMAC_RX_CLK_TX(dev->def->index);
895                         netif_carrier_off(dev->ndev);
896                         link_poll_interval = PHY_POLL_LINK_OFF;
897                 }
898                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
899                 emac_print_link_status(dev);
900         } else
901                 netif_carrier_on(dev->ndev);
902
903         emac_configure(dev);
904         mal_poll_add(dev->mal, &dev->commac);
905         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
906         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
907         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
908         emac_tx_enable(dev);
909         emac_rx_enable(dev);
910         netif_start_queue(ndev);
911         local_bh_enable();
912
913         return 0;
914       oom:
915         emac_clean_rx_ring(dev);
916         free_irq(dev->def->irq, dev);
917         return -ENOMEM;
918 }
919
920 /* BHs disabled */
921 static int emac_link_differs(struct ocp_enet_private *dev)
922 {
923         u32 r = in_be32(&dev->emacp->mr1);
924
925         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
926         int speed, pause, asym_pause;
927
928         if (r & EMAC_MR1_MF_1000)
929                 speed = SPEED_1000;
930         else if (r & EMAC_MR1_MF_100)
931                 speed = SPEED_100;
932         else
933                 speed = SPEED_10;
934
935         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
936         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
937                 pause = 1;
938                 asym_pause = 0;
939                 break;
940         case EMAC_MR1_APP:
941                 pause = 0;
942                 asym_pause = 1;
943                 break;
944         default:
945                 pause = asym_pause = 0;
946         }
947         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
948             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
949 }
950
951 /* BHs disabled */
952 static void emac_link_timer(unsigned long data)
953 {
954         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
955         int link_poll_interval;
956
957         DBG2("%d: link timer" NL, dev->def->index);
958
959         if (dev->phy.def->ops->poll_link(&dev->phy)) {
960                 if (!netif_carrier_ok(dev->ndev)) {
961                         EMAC_RX_CLK_DEFAULT(dev->def->index);
962
963                         /* Get new link parameters */
964                         dev->phy.def->ops->read_link(&dev->phy);
965
966                         if (dev->tah_dev || emac_link_differs(dev))
967                                 emac_full_tx_reset(dev->ndev);
968
969                         netif_carrier_on(dev->ndev);
970                         emac_print_link_status(dev);
971                 }
972                 link_poll_interval = PHY_POLL_LINK_ON;
973         } else {
974                 if (netif_carrier_ok(dev->ndev)) {
975                         EMAC_RX_CLK_TX(dev->def->index);
976 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
977                         emac_reinitialize(dev);
978 #endif
979                         netif_carrier_off(dev->ndev);
980                         emac_print_link_status(dev);
981                 }
982
983                 /* Retry reset if the previous attempt failed.
984                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
985                  * case, but I left it here because it shouldn't trigger for
986                  * sane PHYs anyway.
987                  */
988                 if (unlikely(dev->reset_failed))
989                         emac_reinitialize(dev);
990
991                 link_poll_interval = PHY_POLL_LINK_OFF;
992         }
993         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
994 }
995
996 /* BHs disabled */
997 static void emac_force_link_update(struct ocp_enet_private *dev)
998 {
999         netif_carrier_off(dev->ndev);
1000         if (timer_pending(&dev->link_timer))
1001                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1002 }
1003
1004 /* Process ctx, rtnl_lock semaphore */
1005 static int emac_close(struct net_device *ndev)
1006 {
1007         struct ocp_enet_private *dev = ndev->priv;
1008         struct ocp_func_emac_data *emacdata = dev->def->additions;
1009
1010         DBG("%d: close" NL, dev->def->index);
1011
1012         local_bh_disable();
1013
1014         if (dev->phy.address >= 0)
1015                 del_timer_sync(&dev->link_timer);
1016
1017         netif_stop_queue(ndev);
1018         emac_rx_disable(dev);
1019         emac_tx_disable(dev);
1020         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1021         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1022         mal_poll_del(dev->mal, &dev->commac);
1023         local_bh_enable();
1024
1025         emac_clean_tx_ring(dev);
1026         emac_clean_rx_ring(dev);
1027         free_irq(dev->def->irq, dev);
1028
1029         return 0;
1030 }
1031
1032 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1033                                struct sk_buff *skb)
1034 {
1035 #if defined(CONFIG_IBM_EMAC_TAH)
1036         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1037                 ++dev->stats.tx_packets_csum;
1038                 return EMAC_TX_CTRL_TAH_CSUM;
1039         }
1040 #endif
1041         return 0;
1042 }
1043
1044 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1045 {
1046         struct emac_regs __iomem *p = dev->emacp;
1047         struct net_device *ndev = dev->ndev;
1048
1049         /* Send the packet out */
1050         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1051
1052         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1053                 netif_stop_queue(ndev);
1054                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1055         }
1056
1057         ndev->trans_start = jiffies;
1058         ++dev->stats.tx_packets;
1059         dev->stats.tx_bytes += len;
1060
1061         return 0;
1062 }
1063
1064 /* BHs disabled */
1065 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1066 {
1067         struct ocp_enet_private *dev = ndev->priv;
1068         unsigned int len = skb->len;
1069         int slot;
1070
1071         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1072             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1073
1074         slot = dev->tx_slot++;
1075         if (dev->tx_slot == NUM_TX_BUFF) {
1076                 dev->tx_slot = 0;
1077                 ctrl |= MAL_TX_CTRL_WRAP;
1078         }
1079
1080         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1081
1082         dev->tx_skb[slot] = skb;
1083         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1084                                                      DMA_TO_DEVICE);
1085         dev->tx_desc[slot].data_len = (u16) len;
1086         barrier();
1087         dev->tx_desc[slot].ctrl = ctrl;
1088
1089         return emac_xmit_finish(dev, len);
1090 }
1091
1092 #if defined(CONFIG_IBM_EMAC_TAH)
1093 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1094                                   u32 pd, int len, int last, u16 base_ctrl)
1095 {
1096         while (1) {
1097                 u16 ctrl = base_ctrl;
1098                 int chunk = min(len, MAL_MAX_TX_SIZE);
1099                 len -= chunk;
1100
1101                 slot = (slot + 1) % NUM_TX_BUFF;
1102
1103                 if (last && !len)
1104                         ctrl |= MAL_TX_CTRL_LAST;
1105                 if (slot == NUM_TX_BUFF - 1)
1106                         ctrl |= MAL_TX_CTRL_WRAP;
1107
1108                 dev->tx_skb[slot] = NULL;
1109                 dev->tx_desc[slot].data_ptr = pd;
1110                 dev->tx_desc[slot].data_len = (u16) chunk;
1111                 dev->tx_desc[slot].ctrl = ctrl;
1112                 ++dev->tx_cnt;
1113
1114                 if (!len)
1115                         break;
1116
1117                 pd += chunk;
1118         }
1119         return slot;
1120 }
1121
1122 /* BHs disabled (SG version for TAH equipped EMACs) */
1123 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1124 {
1125         struct ocp_enet_private *dev = ndev->priv;
1126         int nr_frags = skb_shinfo(skb)->nr_frags;
1127         int len = skb->len, chunk;
1128         int slot, i;
1129         u16 ctrl;
1130         u32 pd;
1131
1132         /* This is common "fast" path */
1133         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1134                 return emac_start_xmit(skb, ndev);
1135
1136         len -= skb->data_len;
1137
1138         /* Note, this is only an *estimation*, we can still run out of empty
1139          * slots because of the additional fragmentation into
1140          * MAL_MAX_TX_SIZE-sized chunks
1141          */
1142         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1143                 goto stop_queue;
1144
1145         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1146             emac_tx_csum(dev, skb);
1147         slot = dev->tx_slot;
1148
1149         /* skb data */
1150         dev->tx_skb[slot] = NULL;
1151         chunk = min(len, MAL_MAX_TX_SIZE);
1152         dev->tx_desc[slot].data_ptr = pd =
1153             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1154         dev->tx_desc[slot].data_len = (u16) chunk;
1155         len -= chunk;
1156         if (unlikely(len))
1157                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1158                                        ctrl);
1159         /* skb fragments */
1160         for (i = 0; i < nr_frags; ++i) {
1161                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1162                 len = frag->size;
1163
1164                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1165                         goto undo_frame;
1166
1167                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1168                                   DMA_TO_DEVICE);
1169
1170                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1171                                        ctrl);
1172         }
1173
1174         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1175              dev->tx_slot, slot);
1176
1177         /* Attach skb to the last slot so we don't release it too early */
1178         dev->tx_skb[slot] = skb;
1179
1180         /* Send the packet out */
1181         if (dev->tx_slot == NUM_TX_BUFF - 1)
1182                 ctrl |= MAL_TX_CTRL_WRAP;
1183         barrier();
1184         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1185         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1186
1187         return emac_xmit_finish(dev, skb->len);
1188
1189       undo_frame:
1190         /* Well, too bad. Our previous estimation was overly optimistic. 
1191          * Undo everything.
1192          */
1193         while (slot != dev->tx_slot) {
1194                 dev->tx_desc[slot].ctrl = 0;
1195                 --dev->tx_cnt;
1196                 if (--slot < 0)
1197                         slot = NUM_TX_BUFF - 1;
1198         }
1199         ++dev->estats.tx_undo;
1200
1201       stop_queue:
1202         netif_stop_queue(ndev);
1203         DBG2("%d: stopped TX queue" NL, dev->def->index);
1204         return 1;
1205 }
1206 #else
1207 # define emac_start_xmit_sg     emac_start_xmit
1208 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1209
1210 /* BHs disabled */
1211 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1212 {
1213         struct ibm_emac_error_stats *st = &dev->estats;
1214         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1215
1216         ++st->tx_bd_errors;
1217         if (ctrl & EMAC_TX_ST_BFCS)
1218                 ++st->tx_bd_bad_fcs;
1219         if (ctrl & EMAC_TX_ST_LCS)
1220                 ++st->tx_bd_carrier_loss;
1221         if (ctrl & EMAC_TX_ST_ED)
1222                 ++st->tx_bd_excessive_deferral;
1223         if (ctrl & EMAC_TX_ST_EC)
1224                 ++st->tx_bd_excessive_collisions;
1225         if (ctrl & EMAC_TX_ST_LC)
1226                 ++st->tx_bd_late_collision;
1227         if (ctrl & EMAC_TX_ST_MC)
1228                 ++st->tx_bd_multple_collisions;
1229         if (ctrl & EMAC_TX_ST_SC)
1230                 ++st->tx_bd_single_collision;
1231         if (ctrl & EMAC_TX_ST_UR)
1232                 ++st->tx_bd_underrun;
1233         if (ctrl & EMAC_TX_ST_SQE)
1234                 ++st->tx_bd_sqe;
1235 }
1236
1237 static void emac_poll_tx(void *param)
1238 {
1239         struct ocp_enet_private *dev = param;
1240         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1241              dev->ack_slot);
1242
1243         if (dev->tx_cnt) {
1244                 u16 ctrl;
1245                 int slot = dev->ack_slot, n = 0;
1246               again:
1247                 ctrl = dev->tx_desc[slot].ctrl;
1248                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1249                         struct sk_buff *skb = dev->tx_skb[slot];
1250                         ++n;
1251
1252                         if (skb) {
1253                                 dev_kfree_skb(skb);
1254                                 dev->tx_skb[slot] = NULL;
1255                         }
1256                         slot = (slot + 1) % NUM_TX_BUFF;
1257
1258                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1259                                 emac_parse_tx_error(dev, ctrl);
1260
1261                         if (--dev->tx_cnt)
1262                                 goto again;
1263                 }
1264                 if (n) {
1265                         dev->ack_slot = slot;
1266                         if (netif_queue_stopped(dev->ndev) &&
1267                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1268                                 netif_wake_queue(dev->ndev);
1269
1270                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1271                 }
1272         }
1273 }
1274
1275 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1276                                        int len)
1277 {
1278         struct sk_buff *skb = dev->rx_skb[slot];
1279         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1280
1281         if (len) 
1282                 dma_map_single(dev->ldev, skb->data - 2, 
1283                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1284
1285         dev->rx_desc[slot].data_len = 0;
1286         barrier();
1287         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1288             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1289 }
1290
1291 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1292 {
1293         struct ibm_emac_error_stats *st = &dev->estats;
1294         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1295
1296         ++st->rx_bd_errors;
1297         if (ctrl & EMAC_RX_ST_OE)
1298                 ++st->rx_bd_overrun;
1299         if (ctrl & EMAC_RX_ST_BP)
1300                 ++st->rx_bd_bad_packet;
1301         if (ctrl & EMAC_RX_ST_RP)
1302                 ++st->rx_bd_runt_packet;
1303         if (ctrl & EMAC_RX_ST_SE)
1304                 ++st->rx_bd_short_event;
1305         if (ctrl & EMAC_RX_ST_AE)
1306                 ++st->rx_bd_alignment_error;
1307         if (ctrl & EMAC_RX_ST_BFCS)
1308                 ++st->rx_bd_bad_fcs;
1309         if (ctrl & EMAC_RX_ST_PTL)
1310                 ++st->rx_bd_packet_too_long;
1311         if (ctrl & EMAC_RX_ST_ORE)
1312                 ++st->rx_bd_out_of_range;
1313         if (ctrl & EMAC_RX_ST_IRE)
1314                 ++st->rx_bd_in_range;
1315 }
1316
1317 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1318                                 struct sk_buff *skb, u16 ctrl)
1319 {
1320 #if defined(CONFIG_IBM_EMAC_TAH)
1321         if (!ctrl && dev->tah_dev) {
1322                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1323                 ++dev->stats.rx_packets_csum;
1324         }
1325 #endif
1326 }
1327
1328 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1329 {
1330         if (likely(dev->rx_sg_skb != NULL)) {
1331                 int len = dev->rx_desc[slot].data_len;
1332                 int tot_len = dev->rx_sg_skb->len + len;
1333
1334                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1335                         ++dev->estats.rx_dropped_mtu;
1336                         dev_kfree_skb(dev->rx_sg_skb);
1337                         dev->rx_sg_skb = NULL;
1338                 } else {
1339                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1340                                          dev->rx_skb[slot]->data, len);
1341                         skb_put(dev->rx_sg_skb, len);
1342                         emac_recycle_rx_skb(dev, slot, len);
1343                         return 0;
1344                 }
1345         }
1346         emac_recycle_rx_skb(dev, slot, 0);
1347         return -1;
1348 }
1349
1350 /* BHs disabled */
1351 static int emac_poll_rx(void *param, int budget)
1352 {
1353         struct ocp_enet_private *dev = param;
1354         int slot = dev->rx_slot, received = 0;
1355
1356         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1357
1358       again:
1359         while (budget > 0) {
1360                 int len;
1361                 struct sk_buff *skb;
1362                 u16 ctrl = dev->rx_desc[slot].ctrl;
1363
1364                 if (ctrl & MAL_RX_CTRL_EMPTY)
1365                         break;
1366
1367                 skb = dev->rx_skb[slot];
1368                 barrier();
1369                 len = dev->rx_desc[slot].data_len;
1370
1371                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1372                         goto sg;
1373
1374                 ctrl &= EMAC_BAD_RX_MASK;
1375                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1376                         emac_parse_rx_error(dev, ctrl);
1377                         ++dev->estats.rx_dropped_error;
1378                         emac_recycle_rx_skb(dev, slot, 0);
1379                         len = 0;
1380                         goto next;
1381                 }
1382
1383                 if (len && len < EMAC_RX_COPY_THRESH) {
1384                         struct sk_buff *copy_skb =
1385                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1386                         if (unlikely(!copy_skb))
1387                                 goto oom;
1388
1389                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1390                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1391                                          len + 2);
1392                         emac_recycle_rx_skb(dev, slot, len);
1393                         skb = copy_skb;
1394                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1395                         goto oom;
1396
1397                 skb_put(skb, len);
1398               push_packet:
1399                 skb->protocol = eth_type_trans(skb, dev->ndev);
1400                 emac_rx_csum(dev, skb, ctrl);
1401
1402                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1403                         ++dev->estats.rx_dropped_stack;
1404               next:
1405                 ++dev->stats.rx_packets;
1406               skip:
1407                 dev->stats.rx_bytes += len;
1408                 slot = (slot + 1) % NUM_RX_BUFF;
1409                 --budget;
1410                 ++received;
1411                 continue;
1412               sg:
1413                 if (ctrl & MAL_RX_CTRL_FIRST) {
1414                         BUG_ON(dev->rx_sg_skb);
1415                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1416                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1417                                 ++dev->estats.rx_dropped_oom;
1418                                 emac_recycle_rx_skb(dev, slot, 0);
1419                         } else {
1420                                 dev->rx_sg_skb = skb;
1421                                 skb_put(skb, len);
1422                         }
1423                 } else if (!emac_rx_sg_append(dev, slot) &&
1424                            (ctrl & MAL_RX_CTRL_LAST)) {
1425
1426                         skb = dev->rx_sg_skb;
1427                         dev->rx_sg_skb = NULL;
1428
1429                         ctrl &= EMAC_BAD_RX_MASK;
1430                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1431                                 emac_parse_rx_error(dev, ctrl);
1432                                 ++dev->estats.rx_dropped_error;
1433                                 dev_kfree_skb(skb);
1434                                 len = 0;
1435                         } else
1436                                 goto push_packet;
1437                 }
1438                 goto skip;
1439               oom:
1440                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1441                 /* Drop the packet and recycle skb */
1442                 ++dev->estats.rx_dropped_oom;
1443                 emac_recycle_rx_skb(dev, slot, 0);
1444                 goto next;
1445         }
1446
1447         if (received) {
1448                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1449                 dev->rx_slot = slot;
1450         }
1451
1452         if (unlikely(budget && dev->commac.rx_stopped)) {
1453                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1454
1455                 barrier();
1456                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1457                         DBG2("%d: rx restart" NL, dev->def->index);
1458                         received = 0;
1459                         goto again;
1460                 }
1461
1462                 if (dev->rx_sg_skb) {
1463                         DBG2("%d: dropping partial rx packet" NL,
1464                              dev->def->index);
1465                         ++dev->estats.rx_dropped_error;
1466                         dev_kfree_skb(dev->rx_sg_skb);
1467                         dev->rx_sg_skb = NULL;
1468                 }
1469
1470                 dev->commac.rx_stopped = 0;
1471                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1472                 emac_rx_enable(dev);
1473                 dev->rx_slot = 0;
1474         }
1475         return received;
1476 }
1477
1478 /* BHs disabled */
1479 static int emac_peek_rx(void *param)
1480 {
1481         struct ocp_enet_private *dev = param;
1482         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1483 }
1484
1485 /* BHs disabled */
1486 static int emac_peek_rx_sg(void *param)
1487 {
1488         struct ocp_enet_private *dev = param;
1489         int slot = dev->rx_slot;
1490         while (1) {
1491                 u16 ctrl = dev->rx_desc[slot].ctrl;
1492                 if (ctrl & MAL_RX_CTRL_EMPTY)
1493                         return 0;
1494                 else if (ctrl & MAL_RX_CTRL_LAST)
1495                         return 1;
1496
1497                 slot = (slot + 1) % NUM_RX_BUFF;
1498
1499                 /* I'm just being paranoid here :) */
1500                 if (unlikely(slot == dev->rx_slot))
1501                         return 0;
1502         }
1503 }
1504
1505 /* Hard IRQ */
1506 static void emac_rxde(void *param)
1507 {
1508         struct ocp_enet_private *dev = param;
1509         ++dev->estats.rx_stopped;
1510         emac_rx_disable_async(dev);
1511 }
1512
1513 /* Hard IRQ */
1514 static irqreturn_t emac_irq(int irq, void *dev_instance)
1515 {
1516         struct ocp_enet_private *dev = dev_instance;
1517         struct emac_regs __iomem *p = dev->emacp;
1518         struct ibm_emac_error_stats *st = &dev->estats;
1519
1520         u32 isr = in_be32(&p->isr);
1521         out_be32(&p->isr, isr);
1522
1523         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1524
1525         if (isr & EMAC_ISR_TXPE)
1526                 ++st->tx_parity;
1527         if (isr & EMAC_ISR_RXPE)
1528                 ++st->rx_parity;
1529         if (isr & EMAC_ISR_TXUE)
1530                 ++st->tx_underrun;
1531         if (isr & EMAC_ISR_RXOE)
1532                 ++st->rx_fifo_overrun;
1533         if (isr & EMAC_ISR_OVR)
1534                 ++st->rx_overrun;
1535         if (isr & EMAC_ISR_BP)
1536                 ++st->rx_bad_packet;
1537         if (isr & EMAC_ISR_RP)
1538                 ++st->rx_runt_packet;
1539         if (isr & EMAC_ISR_SE)
1540                 ++st->rx_short_event;
1541         if (isr & EMAC_ISR_ALE)
1542                 ++st->rx_alignment_error;
1543         if (isr & EMAC_ISR_BFCS)
1544                 ++st->rx_bad_fcs;
1545         if (isr & EMAC_ISR_PTLE)
1546                 ++st->rx_packet_too_long;
1547         if (isr & EMAC_ISR_ORE)
1548                 ++st->rx_out_of_range;
1549         if (isr & EMAC_ISR_IRE)
1550                 ++st->rx_in_range;
1551         if (isr & EMAC_ISR_SQE)
1552                 ++st->tx_sqe;
1553         if (isr & EMAC_ISR_TE)
1554                 ++st->tx_errors;
1555
1556         return IRQ_HANDLED;
1557 }
1558
1559 static struct net_device_stats *emac_stats(struct net_device *ndev)
1560 {
1561         struct ocp_enet_private *dev = ndev->priv;
1562         struct ibm_emac_stats *st = &dev->stats;
1563         struct ibm_emac_error_stats *est = &dev->estats;
1564         struct net_device_stats *nst = &dev->nstats;
1565
1566         DBG2("%d: stats" NL, dev->def->index);
1567
1568         /* Compute "legacy" statistics */
1569         local_irq_disable();
1570         nst->rx_packets = (unsigned long)st->rx_packets;
1571         nst->rx_bytes = (unsigned long)st->rx_bytes;
1572         nst->tx_packets = (unsigned long)st->tx_packets;
1573         nst->tx_bytes = (unsigned long)st->tx_bytes;
1574         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1575                                           est->rx_dropped_error +
1576                                           est->rx_dropped_resize +
1577                                           est->rx_dropped_mtu);
1578         nst->tx_dropped = (unsigned long)est->tx_dropped;
1579
1580         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1581         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1582                                               est->rx_fifo_overrun +
1583                                               est->rx_overrun);
1584         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1585                                                est->rx_alignment_error);
1586         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1587                                              est->rx_bad_fcs);
1588         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1589                                                 est->rx_bd_short_event +
1590                                                 est->rx_bd_packet_too_long +
1591                                                 est->rx_bd_out_of_range +
1592                                                 est->rx_bd_in_range +
1593                                                 est->rx_runt_packet +
1594                                                 est->rx_short_event +
1595                                                 est->rx_packet_too_long +
1596                                                 est->rx_out_of_range +
1597                                                 est->rx_in_range);
1598
1599         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1600         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1601                                               est->tx_underrun);
1602         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1603         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1604                                           est->tx_bd_excessive_collisions +
1605                                           est->tx_bd_late_collision +
1606                                           est->tx_bd_multple_collisions);
1607         local_irq_enable();
1608         return nst;
1609 }
1610
1611 static void emac_remove(struct ocp_device *ocpdev)
1612 {
1613         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1614
1615         DBG("%d: remove" NL, dev->def->index);
1616
1617         ocp_set_drvdata(ocpdev, NULL);
1618         unregister_netdev(dev->ndev);
1619
1620         tah_fini(dev->tah_dev);
1621         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1622         zmii_fini(dev->zmii_dev, dev->zmii_input);
1623
1624         emac_dbg_register(dev->def->index, NULL);
1625
1626         mal_unregister_commac(dev->mal, &dev->commac);
1627         iounmap(dev->emacp);
1628         kfree(dev->ndev);
1629 }
1630
1631 static struct mal_commac_ops emac_commac_ops = {
1632         .poll_tx = &emac_poll_tx,
1633         .poll_rx = &emac_poll_rx,
1634         .peek_rx = &emac_peek_rx,
1635         .rxde = &emac_rxde,
1636 };
1637
1638 static struct mal_commac_ops emac_commac_sg_ops = {
1639         .poll_tx = &emac_poll_tx,
1640         .poll_rx = &emac_poll_rx,
1641         .peek_rx = &emac_peek_rx_sg,
1642         .rxde = &emac_rxde,
1643 };
1644
1645 /* Ethtool support */
1646 static int emac_ethtool_get_settings(struct net_device *ndev,
1647                                      struct ethtool_cmd *cmd)
1648 {
1649         struct ocp_enet_private *dev = ndev->priv;
1650
1651         cmd->supported = dev->phy.features;
1652         cmd->port = PORT_MII;
1653         cmd->phy_address = dev->phy.address;
1654         cmd->transceiver =
1655             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1656
1657         local_bh_disable();
1658         cmd->advertising = dev->phy.advertising;
1659         cmd->autoneg = dev->phy.autoneg;
1660         cmd->speed = dev->phy.speed;
1661         cmd->duplex = dev->phy.duplex;
1662         local_bh_enable();
1663
1664         return 0;
1665 }
1666
1667 static int emac_ethtool_set_settings(struct net_device *ndev,
1668                                      struct ethtool_cmd *cmd)
1669 {
1670         struct ocp_enet_private *dev = ndev->priv;
1671         u32 f = dev->phy.features;
1672
1673         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1674             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1675
1676         /* Basic sanity checks */
1677         if (dev->phy.address < 0)
1678                 return -EOPNOTSUPP;
1679         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1680                 return -EINVAL;
1681         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1682                 return -EINVAL;
1683         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1684                 return -EINVAL;
1685
1686         if (cmd->autoneg == AUTONEG_DISABLE) {
1687                 switch (cmd->speed) {
1688                 case SPEED_10:
1689                         if (cmd->duplex == DUPLEX_HALF
1690                             && !(f & SUPPORTED_10baseT_Half))
1691                                 return -EINVAL;
1692                         if (cmd->duplex == DUPLEX_FULL
1693                             && !(f & SUPPORTED_10baseT_Full))
1694                                 return -EINVAL;
1695                         break;
1696                 case SPEED_100:
1697                         if (cmd->duplex == DUPLEX_HALF
1698                             && !(f & SUPPORTED_100baseT_Half))
1699                                 return -EINVAL;
1700                         if (cmd->duplex == DUPLEX_FULL
1701                             && !(f & SUPPORTED_100baseT_Full))
1702                                 return -EINVAL;
1703                         break;
1704                 case SPEED_1000:
1705                         if (cmd->duplex == DUPLEX_HALF
1706                             && !(f & SUPPORTED_1000baseT_Half))
1707                                 return -EINVAL;
1708                         if (cmd->duplex == DUPLEX_FULL
1709                             && !(f & SUPPORTED_1000baseT_Full))
1710                                 return -EINVAL;
1711                         break;
1712                 default:
1713                         return -EINVAL;
1714                 }
1715
1716                 local_bh_disable();
1717                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1718                                                 cmd->duplex);
1719
1720         } else {
1721                 if (!(f & SUPPORTED_Autoneg))
1722                         return -EINVAL;
1723
1724                 local_bh_disable();
1725                 dev->phy.def->ops->setup_aneg(&dev->phy,
1726                                               (cmd->advertising & f) |
1727                                               (dev->phy.advertising &
1728                                                (ADVERTISED_Pause |
1729                                                 ADVERTISED_Asym_Pause)));
1730         }
1731         emac_force_link_update(dev);
1732         local_bh_enable();
1733
1734         return 0;
1735 }
1736
1737 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1738                                        struct ethtool_ringparam *rp)
1739 {
1740         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1741         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1742 }
1743
1744 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1745                                         struct ethtool_pauseparam *pp)
1746 {
1747         struct ocp_enet_private *dev = ndev->priv;
1748
1749         local_bh_disable();
1750         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1751             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1752                 pp->autoneg = 1;
1753
1754         if (dev->phy.duplex == DUPLEX_FULL) {
1755                 if (dev->phy.pause)
1756                         pp->rx_pause = pp->tx_pause = 1;
1757                 else if (dev->phy.asym_pause)
1758                         pp->tx_pause = 1;
1759         }
1760         local_bh_enable();
1761 }
1762
1763 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1764 {
1765         struct ocp_enet_private *dev = ndev->priv;
1766         return dev->tah_dev != 0;
1767 }
1768
1769 static int emac_get_regs_len(struct ocp_enet_private *dev)
1770 {
1771         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1772 }
1773
1774 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1775 {
1776         struct ocp_enet_private *dev = ndev->priv;
1777         return sizeof(struct emac_ethtool_regs_hdr) +
1778             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1779             zmii_get_regs_len(dev->zmii_dev) +
1780             rgmii_get_regs_len(dev->rgmii_dev) +
1781             tah_get_regs_len(dev->tah_dev);
1782 }
1783
1784 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1785 {
1786         struct emac_ethtool_regs_subhdr *hdr = buf;
1787
1788         hdr->version = EMAC_ETHTOOL_REGS_VER;
1789         hdr->index = dev->def->index;
1790         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1791         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1792 }
1793
1794 static void emac_ethtool_get_regs(struct net_device *ndev,
1795                                   struct ethtool_regs *regs, void *buf)
1796 {
1797         struct ocp_enet_private *dev = ndev->priv;
1798         struct emac_ethtool_regs_hdr *hdr = buf;
1799
1800         hdr->components = 0;
1801         buf = hdr + 1;
1802
1803         local_irq_disable();
1804         buf = mal_dump_regs(dev->mal, buf);
1805         buf = emac_dump_regs(dev, buf);
1806         if (dev->zmii_dev) {
1807                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1808                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1809         }
1810         if (dev->rgmii_dev) {
1811                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1812                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1813         }
1814         if (dev->tah_dev) {
1815                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1816                 buf = tah_dump_regs(dev->tah_dev, buf);
1817         }
1818         local_irq_enable();
1819 }
1820
1821 static int emac_ethtool_nway_reset(struct net_device *ndev)
1822 {
1823         struct ocp_enet_private *dev = ndev->priv;
1824         int res = 0;
1825
1826         DBG("%d: nway_reset" NL, dev->def->index);
1827
1828         if (dev->phy.address < 0)
1829                 return -EOPNOTSUPP;
1830
1831         local_bh_disable();
1832         if (!dev->phy.autoneg) {
1833                 res = -EINVAL;
1834                 goto out;
1835         }
1836
1837         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1838         emac_force_link_update(dev);
1839
1840       out:
1841         local_bh_enable();
1842         return res;
1843 }
1844
1845 static int emac_get_sset_count(struct net_device *ndev, int sset)
1846 {
1847         switch (sset) {
1848         case ETH_SS_STATS:
1849                 return EMAC_ETHTOOL_STATS_COUNT;
1850         default:
1851                 return -EOPNOTSUPP;
1852         }
1853 }
1854
1855 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1856                                      u8 * buf)
1857 {
1858         if (stringset == ETH_SS_STATS)
1859                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1860 }
1861
1862 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1863                                            struct ethtool_stats *estats,
1864                                            u64 * tmp_stats)
1865 {
1866         struct ocp_enet_private *dev = ndev->priv;
1867         local_irq_disable();
1868         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1869         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1870         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1871         local_irq_enable();
1872 }
1873
1874 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1875                                      struct ethtool_drvinfo *info)
1876 {
1877         struct ocp_enet_private *dev = ndev->priv;
1878
1879         strcpy(info->driver, "ibm_emac");
1880         strcpy(info->version, DRV_VERSION);
1881         info->fw_version[0] = '\0';
1882         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1883         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1884 }
1885
1886 static const struct ethtool_ops emac_ethtool_ops = {
1887         .get_settings = emac_ethtool_get_settings,
1888         .set_settings = emac_ethtool_set_settings,
1889         .get_drvinfo = emac_ethtool_get_drvinfo,
1890
1891         .get_regs_len = emac_ethtool_get_regs_len,
1892         .get_regs = emac_ethtool_get_regs,
1893
1894         .nway_reset = emac_ethtool_nway_reset,
1895
1896         .get_ringparam = emac_ethtool_get_ringparam,
1897         .get_pauseparam = emac_ethtool_get_pauseparam,
1898
1899         .get_rx_csum = emac_ethtool_get_rx_csum,
1900
1901         .get_strings = emac_ethtool_get_strings,
1902         .get_sset_count = emac_get_sset_count,
1903         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1904
1905         .get_link = ethtool_op_get_link,
1906 };
1907
1908 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1909 {
1910         struct ocp_enet_private *dev = ndev->priv;
1911         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1912
1913         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1914
1915         if (dev->phy.address < 0)
1916                 return -EOPNOTSUPP;
1917
1918         switch (cmd) {
1919         case SIOCGMIIPHY:
1920         case SIOCDEVPRIVATE:
1921                 data[0] = dev->phy.address;
1922                 /* Fall through */
1923         case SIOCGMIIREG:
1924         case SIOCDEVPRIVATE + 1:
1925                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1926                 return 0;
1927
1928         case SIOCSMIIREG:
1929         case SIOCDEVPRIVATE + 2:
1930                 if (!capable(CAP_NET_ADMIN))
1931                         return -EPERM;
1932                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1933                 return 0;
1934         default:
1935                 return -EOPNOTSUPP;
1936         }
1937 }
1938
1939 static int __init emac_probe(struct ocp_device *ocpdev)
1940 {
1941         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1942         struct net_device *ndev;
1943         struct ocp_device *maldev;
1944         struct ocp_enet_private *dev;
1945         int err, i;
1946         DECLARE_MAC_BUF(mac);
1947
1948         DBG("%d: probe" NL, ocpdev->def->index);
1949
1950         if (!emacdata) {
1951                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1952                        ocpdev->def->index);
1953                 return -ENODEV;
1954         }
1955
1956         /* Allocate our net_device structure */
1957         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1958         if (!ndev) {
1959                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1960                        ocpdev->def->index);
1961                 return -ENOMEM;
1962         }
1963         dev = ndev->priv;
1964         dev->ndev = ndev;
1965         dev->ldev = &ocpdev->dev;
1966         dev->def = ocpdev->def;
1967
1968         /* Find MAL device we are connected to */
1969         maldev =
1970             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1971         if (!maldev) {
1972                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1973                        dev->def->index, emacdata->mal_idx);
1974                 err = -ENODEV;
1975                 goto out;
1976         }
1977         dev->mal = ocp_get_drvdata(maldev);
1978         if (!dev->mal) {
1979                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1980                        dev->def->index, emacdata->mal_idx);
1981                 err = -ENODEV;
1982                 goto out;
1983         }
1984
1985         /* Register with MAL */
1986         dev->commac.ops = &emac_commac_ops;
1987         dev->commac.dev = dev;
1988         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1989         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1990         err = mal_register_commac(dev->mal, &dev->commac);
1991         if (err) {
1992                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1993                        dev->def->index, emacdata->mal_idx);
1994                 goto out;
1995         }
1996         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1997         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1998
1999         /* Get pointers to BD rings */
2000         dev->tx_desc =
2001             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2002                                                  emacdata->mal_tx_chan);
2003         dev->rx_desc =
2004             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2005                                                  emacdata->mal_rx_chan);
2006
2007         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2008         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2009
2010         /* Clean rings */
2011         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2012         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2013
2014         /* If we depend on another EMAC for MDIO, check whether it was probed already */
2015         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2016                 struct ocp_device *mdiodev =
2017                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2018                                     emacdata->mdio_idx);
2019                 if (!mdiodev) {
2020                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2021                                dev->def->index, emacdata->mdio_idx);
2022                         err = -ENODEV;
2023                         goto out2;
2024                 }
2025                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2026                 if (!dev->mdio_dev) {
2027                         printk(KERN_ERR
2028                                "emac%d: emac%d hasn't been initialized yet!\n",
2029                                dev->def->index, emacdata->mdio_idx);
2030                         err = -ENODEV;
2031                         goto out2;
2032                 }
2033         }
2034
2035         /* Attach to ZMII, if needed */
2036         if ((err = zmii_attach(dev)) != 0)
2037                 goto out2;
2038
2039         /* Attach to RGMII, if needed */
2040         if ((err = rgmii_attach(dev)) != 0)
2041                 goto out3;
2042
2043         /* Attach to TAH, if needed */
2044         if ((err = tah_attach(dev)) != 0)
2045                 goto out4;
2046
2047         /* Map EMAC regs */
2048         dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2049         if (!dev->emacp) {
2050                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2051                        dev->def->index);
2052                 err = -ENOMEM;
2053                 goto out5;
2054         }
2055
2056         /* Fill in MAC address */
2057         for (i = 0; i < 6; ++i)
2058                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2059
2060         /* Set some link defaults before we can find out real parameters */
2061         dev->phy.speed = SPEED_100;
2062         dev->phy.duplex = DUPLEX_FULL;
2063         dev->phy.autoneg = AUTONEG_DISABLE;
2064         dev->phy.pause = dev->phy.asym_pause = 0;
2065         dev->stop_timeout = STOP_TIMEOUT_100;
2066         init_timer(&dev->link_timer);
2067         dev->link_timer.function = emac_link_timer;
2068         dev->link_timer.data = (unsigned long)dev;
2069
2070         /* Find PHY if any */
2071         dev->phy.dev = ndev;
2072         dev->phy.mode = emacdata->phy_mode;
2073         if (emacdata->phy_map != 0xffffffff) {
2074                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2075                 u32 adv;
2076
2077                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2078                     emacdata->phy_map, busy_phy_map);
2079
2080                 EMAC_RX_CLK_TX(dev->def->index);
2081
2082                 dev->phy.mdio_read = emac_mdio_read;
2083                 dev->phy.mdio_write = emac_mdio_write;
2084
2085                 /* Configure EMAC with defaults so we can at least use MDIO
2086                  * This is needed mostly for 440GX
2087                  */
2088                 if (emac_phy_gpcs(dev->phy.mode)) {
2089                         /* XXX
2090                          * Make GPCS PHY address equal to EMAC index.
2091                          * We probably should take into account busy_phy_map
2092                          * and/or phy_map here.
2093                          */
2094                         dev->phy.address = dev->def->index;
2095                 }
2096                 
2097                 emac_configure(dev);
2098
2099                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2100                         if (!(phy_map & 1)) {
2101                                 int r;
2102                                 busy_phy_map |= 1 << i;
2103
2104                                 /* Quick check if there is a PHY at the address */
2105                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2106                                 if (r == 0xffff || r < 0)
2107                                         continue;
2108                                 if (!mii_phy_probe(&dev->phy, i))
2109                                         break;
2110                         }
2111                 if (i == 0x20) {
2112                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2113                                dev->def->index);
2114                         goto out6;
2115                 }
2116
2117                 /* Init PHY */
2118                 if (dev->phy.def->ops->init)
2119                         dev->phy.def->ops->init(&dev->phy);
2120                 
2121                 /* Disable any PHY features not supported by the platform */
2122                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2123
2124                 /* Setup initial link parameters */
2125                 if (dev->phy.features & SUPPORTED_Autoneg) {
2126                         adv = dev->phy.features;
2127 #if !defined(CONFIG_40x)
2128                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2129 #endif
2130                         /* Restart autonegotiation */
2131                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2132                 } else {
2133                         u32 f = dev->phy.def->features;
2134                         int speed = SPEED_10, fd = DUPLEX_HALF;
2135
2136                         /* Select highest supported speed/duplex */
2137                         if (f & SUPPORTED_1000baseT_Full) {
2138                                 speed = SPEED_1000;
2139                                 fd = DUPLEX_FULL;
2140                         } else if (f & SUPPORTED_1000baseT_Half)
2141                                 speed = SPEED_1000;
2142                         else if (f & SUPPORTED_100baseT_Full) {
2143                                 speed = SPEED_100;
2144                                 fd = DUPLEX_FULL;
2145                         } else if (f & SUPPORTED_100baseT_Half)
2146                                 speed = SPEED_100;
2147                         else if (f & SUPPORTED_10baseT_Full)
2148                                 fd = DUPLEX_FULL;
2149
2150                         /* Force link parameters */
2151                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2152                 }
2153         } else {
2154                 emac_reset(dev);
2155
2156                 /* PHY-less configuration.
2157                  * XXX I probably should move these settings to emacdata
2158                  */
2159                 dev->phy.address = -1;
2160                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2161                 dev->phy.pause = 1;
2162         }
2163
2164         /* Fill in the driver function table */
2165         ndev->open = &emac_open;
2166         if (dev->tah_dev) {
2167                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2168                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2169         } else
2170                 ndev->hard_start_xmit = &emac_start_xmit;
2171         ndev->tx_timeout = &emac_full_tx_reset;
2172         ndev->watchdog_timeo = 5 * HZ;
2173         ndev->stop = &emac_close;
2174         ndev->get_stats = &emac_stats;
2175         ndev->set_multicast_list = &emac_set_multicast_list;
2176         ndev->do_ioctl = &emac_ioctl;
2177         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2178                 ndev->change_mtu = &emac_change_mtu;
2179                 dev->commac.ops = &emac_commac_sg_ops;
2180         }
2181         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2182
2183         netif_carrier_off(ndev);
2184         netif_stop_queue(ndev);
2185
2186         err = register_netdev(ndev);
2187         if (err) {
2188                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2189                        dev->def->index, err);
2190                 goto out6;
2191         }
2192
2193         ocp_set_drvdata(ocpdev, dev);
2194
2195         printk("%s: emac%d, MAC %s\n",
2196                ndev->name, dev->def->index, print_mac(mac, ndev->dev_addr));
2197
2198         if (dev->phy.address >= 0)
2199                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2200                        dev->phy.def->name, dev->phy.address);
2201
2202         emac_dbg_register(dev->def->index, dev);
2203
2204         return 0;
2205       out6:
2206         iounmap(dev->emacp);
2207       out5:
2208         tah_fini(dev->tah_dev);
2209       out4:
2210         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2211       out3:
2212         zmii_fini(dev->zmii_dev, dev->zmii_input);
2213       out2:
2214         mal_unregister_commac(dev->mal, &dev->commac);
2215       out:
2216         kfree(ndev);
2217         return err;
2218 }
2219
2220 static struct ocp_device_id emac_ids[] = {
2221         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2222         { .vendor = OCP_VENDOR_INVALID}
2223 };
2224
2225 static struct ocp_driver emac_driver = {
2226         .name = "emac",
2227         .id_table = emac_ids,
2228         .probe = emac_probe,
2229         .remove = emac_remove,
2230 };
2231
2232 static int __init emac_init(void)
2233 {
2234         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2235
2236         DBG(": init" NL);
2237
2238         if (mal_init())
2239                 return -ENODEV;
2240
2241         EMAC_CLK_INTERNAL;
2242         if (ocp_register_driver(&emac_driver)) {
2243                 EMAC_CLK_EXTERNAL;
2244                 ocp_unregister_driver(&emac_driver);
2245                 mal_exit();
2246                 return -ENODEV;
2247         }
2248         EMAC_CLK_EXTERNAL;
2249
2250         emac_init_debug();
2251         return 0;
2252 }
2253
2254 static void __exit emac_exit(void)
2255 {
2256         DBG(": exit" NL);
2257         ocp_unregister_driver(&emac_driver);
2258         mal_exit();
2259         emac_fini_debug();
2260 }
2261
2262 module_init(emac_init);
2263 module_exit(emac_exit);