sky2: phy irq on shutdown
[linux-2.6] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/types.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39
40 #include <asm/processor.h>
41 #include <asm/io.h>
42 #include <asm/dma.h>
43 #include <asm/uaccess.h>
44 #include <asm/ocp.h>
45
46 #include "ibm_emac_core.h"
47 #include "ibm_emac_debug.h"
48
49 /*
50  * Lack of dma_unmap_???? calls is intentional.
51  *
52  * API-correct usage requires additional support state information to be 
53  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54  * EMAC design (e.g. TX buffer passed from network stack can be split into
55  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56  * maintaining such information will add additional overhead.
57  * Current DMA API implementation for 4xx processors only ensures cache coherency
58  * and dma_unmap_???? routines are empty and are likely to stay this way.
59  * I decided to omit dma_unmap_??? calls because I don't want to add additional
60  * complexity just for the sake of following some abstract API, when it doesn't
61  * add any real benefit to the driver. I understand that this decision maybe 
62  * controversial, but I really tried to make code API-correct and efficient 
63  * at the same time and didn't come up with code I liked :(.                --ebs
64  */
65
66 #define DRV_NAME        "emac"
67 #define DRV_VERSION     "3.54"
68 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
69
70 MODULE_DESCRIPTION(DRV_DESC);
71 MODULE_AUTHOR
72     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
77
78 /* If packet size is less than this number, we allocate small skb and copy packet 
79  * contents into it instead of just sending original big skb up
80  */
81 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
82
83 /* Since multiple EMACs share MDIO lines in various ways, we need
84  * to avoid re-using the same PHY ID in cases where the arch didn't
85  * setup precise phy_map entries
86  */
87 static u32 busy_phy_map;
88
89 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
90     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92  * with PHY RX clock problem.
93  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
94  * also allows controlling each EMAC clock
95  */
96 static inline void EMAC_RX_CLK_TX(int idx)
97 {
98         unsigned long flags;
99         local_irq_save(flags);
100
101 #if defined(CONFIG_405EP)
102         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
103 #else /* CONFIG_440EP || CONFIG_440GR */
104         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105 #endif
106
107         local_irq_restore(flags);
108 }
109
110 static inline void EMAC_RX_CLK_DEFAULT(int idx)
111 {
112         unsigned long flags;
113         local_irq_save(flags);
114
115 #if defined(CONFIG_405EP)
116         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
117 #else /* CONFIG_440EP */
118         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
119 #endif
120
121         local_irq_restore(flags);
122 }
123 #else
124 #define EMAC_RX_CLK_TX(idx)             ((void)0)
125 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
126 #endif
127
128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
130  * unfortunately this is less flexible than 440EP case, because it's a global 
131  * setting for all EMACs, therefore we do this clock trick only during probe.
132  */
133 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
134                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
136                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137 #else
138 #define EMAC_CLK_INTERNAL               ((void)0)
139 #define EMAC_CLK_EXTERNAL               ((void)0)
140 #endif
141
142 /* I don't want to litter system log with timeout errors 
143  * when we have brain-damaged PHY.
144  */
145 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
146                                              const char *error)
147 {
148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149         DBG("%d: %s" NL, dev->def->index, error);
150 #else
151         if (net_ratelimit())
152                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
153 #endif
154 }
155
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON        HZ
158 #define PHY_POLL_LINK_OFF       (HZ / 5)
159
160 /* Graceful stop timeouts in us. 
161  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
162  */
163 #define STOP_TIMEOUT_10         1230    
164 #define STOP_TIMEOUT_100        124
165 #define STOP_TIMEOUT_1000       13
166 #define STOP_TIMEOUT_1000_JUMBO 73
167
168 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
169 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
170         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
171         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
172         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
173         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
174         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
175         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
176         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
177         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
178         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
179         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
180         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
181         "tx_bd_excessive_collisions", "tx_bd_late_collision",
182         "tx_bd_multple_collisions", "tx_bd_single_collision",
183         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
184         "tx_errors"
185 };
186
187 static irqreturn_t emac_irq(int irq, void *dev_instance);
188 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
189
190 static inline int emac_phy_supports_gige(int phy_mode)
191 {
192         return  phy_mode == PHY_MODE_GMII ||
193                 phy_mode == PHY_MODE_RGMII ||
194                 phy_mode == PHY_MODE_TBI ||
195                 phy_mode == PHY_MODE_RTBI;
196 }
197
198 static inline int emac_phy_gpcs(int phy_mode)
199 {
200         return  phy_mode == PHY_MODE_TBI ||
201                 phy_mode == PHY_MODE_RTBI;
202 }
203
204 static inline void emac_tx_enable(struct ocp_enet_private *dev)
205 {
206         struct emac_regs __iomem *p = dev->emacp;
207         unsigned long flags;
208         u32 r;
209
210         local_irq_save(flags);
211
212         DBG("%d: tx_enable" NL, dev->def->index);
213
214         r = in_be32(&p->mr0);
215         if (!(r & EMAC_MR0_TXE))
216                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
217         local_irq_restore(flags);
218 }
219
220 static void emac_tx_disable(struct ocp_enet_private *dev)
221 {
222         struct emac_regs __iomem *p = dev->emacp;
223         unsigned long flags;
224         u32 r;
225
226         local_irq_save(flags);
227
228         DBG("%d: tx_disable" NL, dev->def->index);
229
230         r = in_be32(&p->mr0);
231         if (r & EMAC_MR0_TXE) {
232                 int n = dev->stop_timeout;
233                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
234                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
235                         udelay(1);
236                         --n;
237                 }       
238                 if (unlikely(!n))
239                         emac_report_timeout_error(dev, "TX disable timeout");
240         }
241         local_irq_restore(flags);
242 }
243
244 static void emac_rx_enable(struct ocp_enet_private *dev)
245 {
246         struct emac_regs __iomem *p = dev->emacp;
247         unsigned long flags;
248         u32 r;
249
250         local_irq_save(flags);
251         if (unlikely(dev->commac.rx_stopped))
252                 goto out;
253
254         DBG("%d: rx_enable" NL, dev->def->index);
255
256         r = in_be32(&p->mr0);
257         if (!(r & EMAC_MR0_RXE)) {
258                 if (unlikely(!(r & EMAC_MR0_RXI))) {
259                         /* Wait if previous async disable is still in progress */
260                         int n = dev->stop_timeout;
261                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262                                 udelay(1);
263                                 --n;
264                         }       
265                         if (unlikely(!n))
266                                 emac_report_timeout_error(dev,
267                                                           "RX disable timeout");
268                 }
269                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270         }
271       out:
272         local_irq_restore(flags);
273 }
274
275 static void emac_rx_disable(struct ocp_enet_private *dev)
276 {
277         struct emac_regs __iomem *p = dev->emacp;
278         unsigned long flags;
279         u32 r;
280
281         local_irq_save(flags);
282
283         DBG("%d: rx_disable" NL, dev->def->index);
284
285         r = in_be32(&p->mr0);
286         if (r & EMAC_MR0_RXE) {
287                 int n = dev->stop_timeout;
288                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
290                         udelay(1);
291                         --n;
292                 }       
293                 if (unlikely(!n))
294                         emac_report_timeout_error(dev, "RX disable timeout");
295         }
296         local_irq_restore(flags);
297 }
298
299 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
300 {
301         struct emac_regs __iomem *p = dev->emacp;
302         unsigned long flags;
303         u32 r;
304
305         local_irq_save(flags);
306
307         DBG("%d: rx_disable_async" NL, dev->def->index);
308
309         r = in_be32(&p->mr0);
310         if (r & EMAC_MR0_RXE)
311                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
312         local_irq_restore(flags);
313 }
314
315 static int emac_reset(struct ocp_enet_private *dev)
316 {
317         struct emac_regs __iomem *p = dev->emacp;
318         unsigned long flags;
319         int n = 20;
320
321         DBG("%d: reset" NL, dev->def->index);
322
323         local_irq_save(flags);
324
325         if (!dev->reset_failed) {
326                 /* 40x erratum suggests stopping RX channel before reset,
327                  * we stop TX as well
328                  */
329                 emac_rx_disable(dev);
330                 emac_tx_disable(dev);
331         }
332
333         out_be32(&p->mr0, EMAC_MR0_SRST);
334         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
335                 --n;
336         local_irq_restore(flags);
337
338         if (n) {
339                 dev->reset_failed = 0;
340                 return 0;
341         } else {
342                 emac_report_timeout_error(dev, "reset timeout");
343                 dev->reset_failed = 1;
344                 return -ETIMEDOUT;
345         }
346 }
347
348 static void emac_hash_mc(struct ocp_enet_private *dev)
349 {
350         struct emac_regs __iomem *p = dev->emacp;
351         u16 gaht[4] = { 0 };
352         struct dev_mc_list *dmi;
353
354         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
355
356         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
357                 int bit;
358                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
359                      dev->def->index,
360                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
361                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
362
363                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
364                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
365         }
366         out_be32(&p->gaht1, gaht[0]);
367         out_be32(&p->gaht2, gaht[1]);
368         out_be32(&p->gaht3, gaht[2]);
369         out_be32(&p->gaht4, gaht[3]);
370 }
371
372 static inline u32 emac_iff2rmr(struct net_device *ndev)
373 {
374         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
375             EMAC_RMR_BASE;
376
377         if (ndev->flags & IFF_PROMISC)
378                 r |= EMAC_RMR_PME;
379         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
380                 r |= EMAC_RMR_PMME;
381         else if (ndev->mc_count > 0)
382                 r |= EMAC_RMR_MAE;
383
384         return r;
385 }
386
387 static inline int emac_opb_mhz(void)
388 {
389         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
390 }
391
392 /* BHs disabled */
393 static int emac_configure(struct ocp_enet_private *dev)
394 {
395         struct emac_regs __iomem *p = dev->emacp;
396         struct net_device *ndev = dev->ndev;
397         int gige;
398         u32 r;
399
400         DBG("%d: configure" NL, dev->def->index);
401
402         if (emac_reset(dev) < 0)
403                 return -ETIMEDOUT;
404
405         tah_reset(dev->tah_dev);
406
407         /* Mode register */
408         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
409         if (dev->phy.duplex == DUPLEX_FULL)
410                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
411         dev->stop_timeout = STOP_TIMEOUT_10;
412         switch (dev->phy.speed) {
413         case SPEED_1000:
414                 if (emac_phy_gpcs(dev->phy.mode)) {
415                         r |= EMAC_MR1_MF_1000GPCS |
416                             EMAC_MR1_MF_IPPA(dev->phy.address);
417
418                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
419                          * identify this GPCS PHY later.
420                          */
421                         out_be32(&p->ipcr, 0xdeadbeef);
422                 } else
423                         r |= EMAC_MR1_MF_1000;
424                 r |= EMAC_MR1_RFS_16K;
425                 gige = 1;
426
427                 if (dev->ndev->mtu > ETH_DATA_LEN) {
428                         r |= EMAC_MR1_JPSM;
429                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
430                 } else
431                         dev->stop_timeout = STOP_TIMEOUT_1000;
432                 break;
433         case SPEED_100:
434                 r |= EMAC_MR1_MF_100;
435                 dev->stop_timeout = STOP_TIMEOUT_100;
436                 /* Fall through */
437         default:
438                 r |= EMAC_MR1_RFS_4K;
439                 gige = 0;
440                 break;
441         }
442
443         if (dev->rgmii_dev)
444                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
445                                 dev->phy.speed);
446         else
447                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
448
449 #if !defined(CONFIG_40x)
450         /* on 40x erratum forces us to NOT use integrated flow control, 
451          * let's hope it works on 44x ;)
452          */
453         if (dev->phy.duplex == DUPLEX_FULL) {
454                 if (dev->phy.pause)
455                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
456                 else if (dev->phy.asym_pause)
457                         r |= EMAC_MR1_APP;
458         }
459 #endif
460         out_be32(&p->mr1, r);
461
462         /* Set individual MAC address */
463         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
464         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
465                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
466                  ndev->dev_addr[5]);
467
468         /* VLAN Tag Protocol ID */
469         out_be32(&p->vtpid, 0x8100);
470
471         /* Receive mode register */
472         r = emac_iff2rmr(ndev);
473         if (r & EMAC_RMR_MAE)
474                 emac_hash_mc(dev);
475         out_be32(&p->rmr, r);
476
477         /* FIFOs thresholds */
478         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
479                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
480         out_be32(&p->tmr1, r);
481         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
482
483         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
484            there should be still enough space in FIFO to allow the our link
485            partner time to process this frame and also time to send PAUSE 
486            frame itself.
487
488            Here is the worst case scenario for the RX FIFO "headroom"
489            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
490
491            1) One maximum-length frame on TX                    1522 bytes
492            2) One PAUSE frame time                                64 bytes
493            3) PAUSE frame decode time allowance                   64 bytes
494            4) One maximum-length frame on RX                    1522 bytes
495            5) Round-trip propagation delay of the link (100Mb)    15 bytes
496            ----------       
497            3187 bytes
498
499            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
500            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
501          */
502         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
503                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
504         out_be32(&p->rwmr, r);
505
506         /* Set PAUSE timer to the maximum */
507         out_be32(&p->ptr, 0xffff);
508
509         /* IRQ sources */
510         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
511                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
512                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
513                  EMAC_ISR_IRE | EMAC_ISR_TE);
514                  
515         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
516         if (emac_phy_gpcs(dev->phy.mode)) 
517                 mii_reset_phy(&dev->phy);
518                  
519         return 0;
520 }
521
522 /* BHs disabled */
523 static void emac_reinitialize(struct ocp_enet_private *dev)
524 {
525         DBG("%d: reinitialize" NL, dev->def->index);
526
527         if (!emac_configure(dev)) {
528                 emac_tx_enable(dev);
529                 emac_rx_enable(dev);
530         }
531 }
532
533 /* BHs disabled */
534 static void emac_full_tx_reset(struct net_device *ndev)
535 {
536         struct ocp_enet_private *dev = ndev->priv;
537         struct ocp_func_emac_data *emacdata = dev->def->additions;
538
539         DBG("%d: full_tx_reset" NL, dev->def->index);
540
541         emac_tx_disable(dev);
542         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
543         emac_clean_tx_ring(dev);
544         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
545
546         emac_configure(dev);
547
548         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
549         emac_tx_enable(dev);
550         emac_rx_enable(dev);
551
552         netif_wake_queue(ndev);
553 }
554
555 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
556 {
557         struct emac_regs __iomem *p = dev->emacp;
558         u32 r;
559         int n;
560
561         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
562
563         /* Enable proper MDIO port */
564         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
565
566         /* Wait for management interface to become idle */
567         n = 10;
568         while (!emac_phy_done(in_be32(&p->stacr))) {
569                 udelay(1);
570                 if (!--n)
571                         goto to;
572         }
573
574         /* Issue read command */
575         out_be32(&p->stacr,
576                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
577                  (reg & EMAC_STACR_PRA_MASK)
578                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
579                  | EMAC_STACR_START);
580
581         /* Wait for read to complete */
582         n = 100;
583         while (!emac_phy_done(r = in_be32(&p->stacr))) {
584                 udelay(1);
585                 if (!--n)
586                         goto to;
587         }
588
589         if (unlikely(r & EMAC_STACR_PHYE)) {
590                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
591                     id, reg);
592                 return -EREMOTEIO;
593         }
594
595         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
596         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
597         return r;
598       to:
599         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
600         return -ETIMEDOUT;
601 }
602
603 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
604                               u16 val)
605 {
606         struct emac_regs __iomem *p = dev->emacp;
607         int n;
608
609         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
610              val);
611
612         /* Enable proper MDIO port */
613         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
614
615         /* Wait for management interface to be idle */
616         n = 10;
617         while (!emac_phy_done(in_be32(&p->stacr))) {
618                 udelay(1);
619                 if (!--n)
620                         goto to;
621         }
622
623         /* Issue write command */
624         out_be32(&p->stacr,
625                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
626                  (reg & EMAC_STACR_PRA_MASK) |
627                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
628                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
629
630         /* Wait for write to complete */
631         n = 100;
632         while (!emac_phy_done(in_be32(&p->stacr))) {
633                 udelay(1);
634                 if (!--n)
635                         goto to;
636         }
637         return;
638       to:
639         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
640 }
641
642 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
643 {
644         struct ocp_enet_private *dev = ndev->priv;
645         int res;
646
647         local_bh_disable();
648         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
649                                (u8) reg);
650         local_bh_enable();
651         return res;
652 }
653
654 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
655 {
656         struct ocp_enet_private *dev = ndev->priv;
657
658         local_bh_disable();
659         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
660                           (u8) reg, (u16) val);
661         local_bh_enable();
662 }
663
664 /* BHs disabled */
665 static void emac_set_multicast_list(struct net_device *ndev)
666 {
667         struct ocp_enet_private *dev = ndev->priv;
668         struct emac_regs __iomem *p = dev->emacp;
669         u32 rmr = emac_iff2rmr(ndev);
670
671         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
672         BUG_ON(!netif_running(dev->ndev));
673
674         /* I decided to relax register access rules here to avoid
675          * full EMAC reset.
676          *
677          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
678          * in MR1 register and do a full EMAC reset.
679          * One TX BD status update is delayed and, after EMAC reset, it 
680          * never happens, resulting in TX hung (it'll be recovered by TX 
681          * timeout handler eventually, but this is just gross).
682          * So we either have to do full TX reset or try to cheat here :)
683          *
684          * The only required change is to RX mode register, so I *think* all
685          * we need is just to stop RX channel. This seems to work on all
686          * tested SoCs.                                                --ebs
687          */
688         emac_rx_disable(dev);
689         if (rmr & EMAC_RMR_MAE)
690                 emac_hash_mc(dev);
691         out_be32(&p->rmr, rmr);
692         emac_rx_enable(dev);
693 }
694
695 /* BHs disabled */
696 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
697 {
698         struct ocp_func_emac_data *emacdata = dev->def->additions;
699         int rx_sync_size = emac_rx_sync_size(new_mtu);
700         int rx_skb_size = emac_rx_skb_size(new_mtu);
701         int i, ret = 0;
702
703         emac_rx_disable(dev);
704         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
705
706         if (dev->rx_sg_skb) {
707                 ++dev->estats.rx_dropped_resize;
708                 dev_kfree_skb(dev->rx_sg_skb);
709                 dev->rx_sg_skb = NULL;
710         }
711
712         /* Make a first pass over RX ring and mark BDs ready, dropping 
713          * non-processed packets on the way. We need this as a separate pass
714          * to simplify error recovery in the case of allocation failure later.
715          */
716         for (i = 0; i < NUM_RX_BUFF; ++i) {
717                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
718                         ++dev->estats.rx_dropped_resize;
719
720                 dev->rx_desc[i].data_len = 0;
721                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
722                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
723         }
724
725         /* Reallocate RX ring only if bigger skb buffers are required */
726         if (rx_skb_size <= dev->rx_skb_size)
727                 goto skip;
728
729         /* Second pass, allocate new skbs */
730         for (i = 0; i < NUM_RX_BUFF; ++i) {
731                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
732                 if (!skb) {
733                         ret = -ENOMEM;
734                         goto oom;
735                 }
736
737                 BUG_ON(!dev->rx_skb[i]);
738                 dev_kfree_skb(dev->rx_skb[i]);
739
740                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
741                 dev->rx_desc[i].data_ptr =
742                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
743                                    DMA_FROM_DEVICE) + 2;
744                 dev->rx_skb[i] = skb;
745         }
746       skip:
747         /* Check if we need to change "Jumbo" bit in MR1 */
748         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
749                 /* This is to prevent starting RX channel in emac_rx_enable() */
750                 dev->commac.rx_stopped = 1;
751
752                 dev->ndev->mtu = new_mtu;
753                 emac_full_tx_reset(dev->ndev);
754         }
755
756         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
757       oom:
758         /* Restart RX */
759         dev->commac.rx_stopped = dev->rx_slot = 0;
760         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
761         emac_rx_enable(dev);
762
763         return ret;
764 }
765
766 /* Process ctx, rtnl_lock semaphore */
767 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
768 {
769         struct ocp_enet_private *dev = ndev->priv;
770         int ret = 0;
771
772         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
773                 return -EINVAL;
774
775         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
776
777         local_bh_disable();
778         if (netif_running(ndev)) {
779                 /* Check if we really need to reinitalize RX ring */
780                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
781                         ret = emac_resize_rx_ring(dev, new_mtu);
782         }
783
784         if (!ret) {
785                 ndev->mtu = new_mtu;
786                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
787                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
788         }       
789         local_bh_enable();
790
791         return ret;
792 }
793
794 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
795 {
796         int i;
797         for (i = 0; i < NUM_TX_BUFF; ++i) {
798                 if (dev->tx_skb[i]) {
799                         dev_kfree_skb(dev->tx_skb[i]);
800                         dev->tx_skb[i] = NULL;
801                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
802                                 ++dev->estats.tx_dropped;
803                 }
804                 dev->tx_desc[i].ctrl = 0;
805                 dev->tx_desc[i].data_ptr = 0;
806         }
807 }
808
809 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
810 {
811         int i;
812         for (i = 0; i < NUM_RX_BUFF; ++i)
813                 if (dev->rx_skb[i]) {
814                         dev->rx_desc[i].ctrl = 0;
815                         dev_kfree_skb(dev->rx_skb[i]);
816                         dev->rx_skb[i] = NULL;
817                         dev->rx_desc[i].data_ptr = 0;
818                 }
819
820         if (dev->rx_sg_skb) {
821                 dev_kfree_skb(dev->rx_sg_skb);
822                 dev->rx_sg_skb = NULL;
823         }
824 }
825
826 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
827                                     gfp_t flags)
828 {
829         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
830         if (unlikely(!skb))
831                 return -ENOMEM;
832
833         dev->rx_skb[slot] = skb;
834         dev->rx_desc[slot].data_len = 0;
835
836         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
837         dev->rx_desc[slot].data_ptr = 
838             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
839                            DMA_FROM_DEVICE) + 2;
840         barrier();
841         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
842             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
843
844         return 0;
845 }
846
847 static void emac_print_link_status(struct ocp_enet_private *dev)
848 {
849         if (netif_carrier_ok(dev->ndev))
850                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
851                        dev->ndev->name, dev->phy.speed,
852                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
853                        dev->phy.pause ? ", pause enabled" :
854                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
855         else
856                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
857 }
858
859 /* Process ctx, rtnl_lock semaphore */
860 static int emac_open(struct net_device *ndev)
861 {
862         struct ocp_enet_private *dev = ndev->priv;
863         struct ocp_func_emac_data *emacdata = dev->def->additions;
864         int err, i;
865
866         DBG("%d: open" NL, dev->def->index);
867
868         /* Setup error IRQ handler */
869         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
870         if (err) {
871                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
872                        ndev->name, dev->def->irq);
873                 return err;
874         }
875
876         /* Allocate RX ring */
877         for (i = 0; i < NUM_RX_BUFF; ++i)
878                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
879                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
880                                ndev->name);
881                         goto oom;
882                 }
883
884         local_bh_disable();
885         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
886             dev->commac.rx_stopped = 0;
887         dev->rx_sg_skb = NULL;
888
889         if (dev->phy.address >= 0) {
890                 int link_poll_interval;
891                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
892                         dev->phy.def->ops->read_link(&dev->phy);
893                         EMAC_RX_CLK_DEFAULT(dev->def->index);
894                         netif_carrier_on(dev->ndev);
895                         link_poll_interval = PHY_POLL_LINK_ON;
896                 } else {
897                         EMAC_RX_CLK_TX(dev->def->index);
898                         netif_carrier_off(dev->ndev);
899                         link_poll_interval = PHY_POLL_LINK_OFF;
900                 }
901                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
902                 emac_print_link_status(dev);
903         } else
904                 netif_carrier_on(dev->ndev);
905
906         emac_configure(dev);
907         mal_poll_add(dev->mal, &dev->commac);
908         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
909         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
910         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
911         emac_tx_enable(dev);
912         emac_rx_enable(dev);
913         netif_start_queue(ndev);
914         local_bh_enable();
915
916         return 0;
917       oom:
918         emac_clean_rx_ring(dev);
919         free_irq(dev->def->irq, dev);
920         return -ENOMEM;
921 }
922
923 /* BHs disabled */
924 static int emac_link_differs(struct ocp_enet_private *dev)
925 {
926         u32 r = in_be32(&dev->emacp->mr1);
927
928         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
929         int speed, pause, asym_pause;
930
931         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
932                 speed = SPEED_1000;
933         else if (r & EMAC_MR1_MF_100)
934                 speed = SPEED_100;
935         else
936                 speed = SPEED_10;
937
938         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
939         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
940                 pause = 1;
941                 asym_pause = 0;
942                 break;
943         case EMAC_MR1_APP:
944                 pause = 0;
945                 asym_pause = 1;
946                 break;
947         default:
948                 pause = asym_pause = 0;
949         }
950         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
951             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
952 }
953
954 /* BHs disabled */
955 static void emac_link_timer(unsigned long data)
956 {
957         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
958         int link_poll_interval;
959
960         DBG2("%d: link timer" NL, dev->def->index);
961
962         if (dev->phy.def->ops->poll_link(&dev->phy)) {
963                 if (!netif_carrier_ok(dev->ndev)) {
964                         EMAC_RX_CLK_DEFAULT(dev->def->index);
965
966                         /* Get new link parameters */
967                         dev->phy.def->ops->read_link(&dev->phy);
968
969                         if (dev->tah_dev || emac_link_differs(dev))
970                                 emac_full_tx_reset(dev->ndev);
971
972                         netif_carrier_on(dev->ndev);
973                         emac_print_link_status(dev);
974                 }
975                 link_poll_interval = PHY_POLL_LINK_ON;
976         } else {
977                 if (netif_carrier_ok(dev->ndev)) {
978                         EMAC_RX_CLK_TX(dev->def->index);
979 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
980                         emac_reinitialize(dev);
981 #endif
982                         netif_carrier_off(dev->ndev);
983                         emac_print_link_status(dev);
984                 }
985
986                 /* Retry reset if the previous attempt failed.
987                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
988                  * case, but I left it here because it shouldn't trigger for
989                  * sane PHYs anyway.
990                  */
991                 if (unlikely(dev->reset_failed))
992                         emac_reinitialize(dev);
993
994                 link_poll_interval = PHY_POLL_LINK_OFF;
995         }
996         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
997 }
998
999 /* BHs disabled */
1000 static void emac_force_link_update(struct ocp_enet_private *dev)
1001 {
1002         netif_carrier_off(dev->ndev);
1003         if (timer_pending(&dev->link_timer))
1004                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1005 }
1006
1007 /* Process ctx, rtnl_lock semaphore */
1008 static int emac_close(struct net_device *ndev)
1009 {
1010         struct ocp_enet_private *dev = ndev->priv;
1011         struct ocp_func_emac_data *emacdata = dev->def->additions;
1012
1013         DBG("%d: close" NL, dev->def->index);
1014
1015         local_bh_disable();
1016
1017         if (dev->phy.address >= 0)
1018                 del_timer_sync(&dev->link_timer);
1019
1020         netif_stop_queue(ndev);
1021         emac_rx_disable(dev);
1022         emac_tx_disable(dev);
1023         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1024         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1025         mal_poll_del(dev->mal, &dev->commac);
1026         local_bh_enable();
1027
1028         emac_clean_tx_ring(dev);
1029         emac_clean_rx_ring(dev);
1030         free_irq(dev->def->irq, dev);
1031
1032         return 0;
1033 }
1034
1035 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1036                                struct sk_buff *skb)
1037 {
1038 #if defined(CONFIG_IBM_EMAC_TAH)
1039         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1040                 ++dev->stats.tx_packets_csum;
1041                 return EMAC_TX_CTRL_TAH_CSUM;
1042         }
1043 #endif
1044         return 0;
1045 }
1046
1047 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1048 {
1049         struct emac_regs __iomem *p = dev->emacp;
1050         struct net_device *ndev = dev->ndev;
1051
1052         /* Send the packet out */
1053         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1054
1055         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1056                 netif_stop_queue(ndev);
1057                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1058         }
1059
1060         ndev->trans_start = jiffies;
1061         ++dev->stats.tx_packets;
1062         dev->stats.tx_bytes += len;
1063
1064         return 0;
1065 }
1066
1067 /* BHs disabled */
1068 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1069 {
1070         struct ocp_enet_private *dev = ndev->priv;
1071         unsigned int len = skb->len;
1072         int slot;
1073
1074         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1075             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1076
1077         slot = dev->tx_slot++;
1078         if (dev->tx_slot == NUM_TX_BUFF) {
1079                 dev->tx_slot = 0;
1080                 ctrl |= MAL_TX_CTRL_WRAP;
1081         }
1082
1083         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1084
1085         dev->tx_skb[slot] = skb;
1086         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1087                                                      DMA_TO_DEVICE);
1088         dev->tx_desc[slot].data_len = (u16) len;
1089         barrier();
1090         dev->tx_desc[slot].ctrl = ctrl;
1091
1092         return emac_xmit_finish(dev, len);
1093 }
1094
1095 #if defined(CONFIG_IBM_EMAC_TAH)
1096 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1097                                   u32 pd, int len, int last, u16 base_ctrl)
1098 {
1099         while (1) {
1100                 u16 ctrl = base_ctrl;
1101                 int chunk = min(len, MAL_MAX_TX_SIZE);
1102                 len -= chunk;
1103
1104                 slot = (slot + 1) % NUM_TX_BUFF;
1105
1106                 if (last && !len)
1107                         ctrl |= MAL_TX_CTRL_LAST;
1108                 if (slot == NUM_TX_BUFF - 1)
1109                         ctrl |= MAL_TX_CTRL_WRAP;
1110
1111                 dev->tx_skb[slot] = NULL;
1112                 dev->tx_desc[slot].data_ptr = pd;
1113                 dev->tx_desc[slot].data_len = (u16) chunk;
1114                 dev->tx_desc[slot].ctrl = ctrl;
1115                 ++dev->tx_cnt;
1116
1117                 if (!len)
1118                         break;
1119
1120                 pd += chunk;
1121         }
1122         return slot;
1123 }
1124
1125 /* BHs disabled (SG version for TAH equipped EMACs) */
1126 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1127 {
1128         struct ocp_enet_private *dev = ndev->priv;
1129         int nr_frags = skb_shinfo(skb)->nr_frags;
1130         int len = skb->len, chunk;
1131         int slot, i;
1132         u16 ctrl;
1133         u32 pd;
1134
1135         /* This is common "fast" path */
1136         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1137                 return emac_start_xmit(skb, ndev);
1138
1139         len -= skb->data_len;
1140
1141         /* Note, this is only an *estimation*, we can still run out of empty
1142          * slots because of the additional fragmentation into
1143          * MAL_MAX_TX_SIZE-sized chunks
1144          */
1145         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1146                 goto stop_queue;
1147
1148         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1149             emac_tx_csum(dev, skb);
1150         slot = dev->tx_slot;
1151
1152         /* skb data */
1153         dev->tx_skb[slot] = NULL;
1154         chunk = min(len, MAL_MAX_TX_SIZE);
1155         dev->tx_desc[slot].data_ptr = pd =
1156             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1157         dev->tx_desc[slot].data_len = (u16) chunk;
1158         len -= chunk;
1159         if (unlikely(len))
1160                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1161                                        ctrl);
1162         /* skb fragments */
1163         for (i = 0; i < nr_frags; ++i) {
1164                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1165                 len = frag->size;
1166
1167                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1168                         goto undo_frame;
1169
1170                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1171                                   DMA_TO_DEVICE);
1172
1173                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1174                                        ctrl);
1175         }
1176
1177         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1178              dev->tx_slot, slot);
1179
1180         /* Attach skb to the last slot so we don't release it too early */
1181         dev->tx_skb[slot] = skb;
1182
1183         /* Send the packet out */
1184         if (dev->tx_slot == NUM_TX_BUFF - 1)
1185                 ctrl |= MAL_TX_CTRL_WRAP;
1186         barrier();
1187         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1188         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1189
1190         return emac_xmit_finish(dev, skb->len);
1191
1192       undo_frame:
1193         /* Well, too bad. Our previous estimation was overly optimistic. 
1194          * Undo everything.
1195          */
1196         while (slot != dev->tx_slot) {
1197                 dev->tx_desc[slot].ctrl = 0;
1198                 --dev->tx_cnt;
1199                 if (--slot < 0)
1200                         slot = NUM_TX_BUFF - 1;
1201         }
1202         ++dev->estats.tx_undo;
1203
1204       stop_queue:
1205         netif_stop_queue(ndev);
1206         DBG2("%d: stopped TX queue" NL, dev->def->index);
1207         return 1;
1208 }
1209 #else
1210 # define emac_start_xmit_sg     emac_start_xmit
1211 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1212
1213 /* BHs disabled */
1214 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1215 {
1216         struct ibm_emac_error_stats *st = &dev->estats;
1217         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1218
1219         ++st->tx_bd_errors;
1220         if (ctrl & EMAC_TX_ST_BFCS)
1221                 ++st->tx_bd_bad_fcs;
1222         if (ctrl & EMAC_TX_ST_LCS)
1223                 ++st->tx_bd_carrier_loss;
1224         if (ctrl & EMAC_TX_ST_ED)
1225                 ++st->tx_bd_excessive_deferral;
1226         if (ctrl & EMAC_TX_ST_EC)
1227                 ++st->tx_bd_excessive_collisions;
1228         if (ctrl & EMAC_TX_ST_LC)
1229                 ++st->tx_bd_late_collision;
1230         if (ctrl & EMAC_TX_ST_MC)
1231                 ++st->tx_bd_multple_collisions;
1232         if (ctrl & EMAC_TX_ST_SC)
1233                 ++st->tx_bd_single_collision;
1234         if (ctrl & EMAC_TX_ST_UR)
1235                 ++st->tx_bd_underrun;
1236         if (ctrl & EMAC_TX_ST_SQE)
1237                 ++st->tx_bd_sqe;
1238 }
1239
1240 static void emac_poll_tx(void *param)
1241 {
1242         struct ocp_enet_private *dev = param;
1243         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1244              dev->ack_slot);
1245
1246         if (dev->tx_cnt) {
1247                 u16 ctrl;
1248                 int slot = dev->ack_slot, n = 0;
1249               again:
1250                 ctrl = dev->tx_desc[slot].ctrl;
1251                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1252                         struct sk_buff *skb = dev->tx_skb[slot];
1253                         ++n;
1254
1255                         if (skb) {
1256                                 dev_kfree_skb(skb);
1257                                 dev->tx_skb[slot] = NULL;
1258                         }
1259                         slot = (slot + 1) % NUM_TX_BUFF;
1260
1261                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1262                                 emac_parse_tx_error(dev, ctrl);
1263
1264                         if (--dev->tx_cnt)
1265                                 goto again;
1266                 }
1267                 if (n) {
1268                         dev->ack_slot = slot;
1269                         if (netif_queue_stopped(dev->ndev) &&
1270                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1271                                 netif_wake_queue(dev->ndev);
1272
1273                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1274                 }
1275         }
1276 }
1277
1278 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1279                                        int len)
1280 {
1281         struct sk_buff *skb = dev->rx_skb[slot];
1282         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1283
1284         if (len) 
1285                 dma_map_single(dev->ldev, skb->data - 2, 
1286                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1287
1288         dev->rx_desc[slot].data_len = 0;
1289         barrier();
1290         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1291             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1292 }
1293
1294 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1295 {
1296         struct ibm_emac_error_stats *st = &dev->estats;
1297         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1298
1299         ++st->rx_bd_errors;
1300         if (ctrl & EMAC_RX_ST_OE)
1301                 ++st->rx_bd_overrun;
1302         if (ctrl & EMAC_RX_ST_BP)
1303                 ++st->rx_bd_bad_packet;
1304         if (ctrl & EMAC_RX_ST_RP)
1305                 ++st->rx_bd_runt_packet;
1306         if (ctrl & EMAC_RX_ST_SE)
1307                 ++st->rx_bd_short_event;
1308         if (ctrl & EMAC_RX_ST_AE)
1309                 ++st->rx_bd_alignment_error;
1310         if (ctrl & EMAC_RX_ST_BFCS)
1311                 ++st->rx_bd_bad_fcs;
1312         if (ctrl & EMAC_RX_ST_PTL)
1313                 ++st->rx_bd_packet_too_long;
1314         if (ctrl & EMAC_RX_ST_ORE)
1315                 ++st->rx_bd_out_of_range;
1316         if (ctrl & EMAC_RX_ST_IRE)
1317                 ++st->rx_bd_in_range;
1318 }
1319
1320 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1321                                 struct sk_buff *skb, u16 ctrl)
1322 {
1323 #if defined(CONFIG_IBM_EMAC_TAH)
1324         if (!ctrl && dev->tah_dev) {
1325                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1326                 ++dev->stats.rx_packets_csum;
1327         }
1328 #endif
1329 }
1330
1331 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1332 {
1333         if (likely(dev->rx_sg_skb != NULL)) {
1334                 int len = dev->rx_desc[slot].data_len;
1335                 int tot_len = dev->rx_sg_skb->len + len;
1336
1337                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1338                         ++dev->estats.rx_dropped_mtu;
1339                         dev_kfree_skb(dev->rx_sg_skb);
1340                         dev->rx_sg_skb = NULL;
1341                 } else {
1342                         cacheable_memcpy(dev->rx_sg_skb->tail,
1343                                          dev->rx_skb[slot]->data, len);
1344                         skb_put(dev->rx_sg_skb, len);
1345                         emac_recycle_rx_skb(dev, slot, len);
1346                         return 0;
1347                 }
1348         }
1349         emac_recycle_rx_skb(dev, slot, 0);
1350         return -1;
1351 }
1352
1353 /* BHs disabled */
1354 static int emac_poll_rx(void *param, int budget)
1355 {
1356         struct ocp_enet_private *dev = param;
1357         int slot = dev->rx_slot, received = 0;
1358
1359         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1360
1361       again:
1362         while (budget > 0) {
1363                 int len;
1364                 struct sk_buff *skb;
1365                 u16 ctrl = dev->rx_desc[slot].ctrl;
1366
1367                 if (ctrl & MAL_RX_CTRL_EMPTY)
1368                         break;
1369
1370                 skb = dev->rx_skb[slot];
1371                 barrier();
1372                 len = dev->rx_desc[slot].data_len;
1373
1374                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1375                         goto sg;
1376
1377                 ctrl &= EMAC_BAD_RX_MASK;
1378                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1379                         emac_parse_rx_error(dev, ctrl);
1380                         ++dev->estats.rx_dropped_error;
1381                         emac_recycle_rx_skb(dev, slot, 0);
1382                         len = 0;
1383                         goto next;
1384                 }
1385
1386                 if (len && len < EMAC_RX_COPY_THRESH) {
1387                         struct sk_buff *copy_skb =
1388                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1389                         if (unlikely(!copy_skb))
1390                                 goto oom;
1391
1392                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1393                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1394                                          len + 2);
1395                         emac_recycle_rx_skb(dev, slot, len);
1396                         skb = copy_skb;
1397                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1398                         goto oom;
1399
1400                 skb_put(skb, len);
1401               push_packet:
1402                 skb->dev = dev->ndev;
1403                 skb->protocol = eth_type_trans(skb, dev->ndev);
1404                 emac_rx_csum(dev, skb, ctrl);
1405
1406                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1407                         ++dev->estats.rx_dropped_stack;
1408               next:
1409                 ++dev->stats.rx_packets;
1410               skip:
1411                 dev->stats.rx_bytes += len;
1412                 slot = (slot + 1) % NUM_RX_BUFF;
1413                 --budget;
1414                 ++received;
1415                 continue;
1416               sg:
1417                 if (ctrl & MAL_RX_CTRL_FIRST) {
1418                         BUG_ON(dev->rx_sg_skb);
1419                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1420                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1421                                 ++dev->estats.rx_dropped_oom;
1422                                 emac_recycle_rx_skb(dev, slot, 0);
1423                         } else {
1424                                 dev->rx_sg_skb = skb;
1425                                 skb_put(skb, len);
1426                         }
1427                 } else if (!emac_rx_sg_append(dev, slot) &&
1428                            (ctrl & MAL_RX_CTRL_LAST)) {
1429
1430                         skb = dev->rx_sg_skb;
1431                         dev->rx_sg_skb = NULL;
1432
1433                         ctrl &= EMAC_BAD_RX_MASK;
1434                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1435                                 emac_parse_rx_error(dev, ctrl);
1436                                 ++dev->estats.rx_dropped_error;
1437                                 dev_kfree_skb(skb);
1438                                 len = 0;
1439                         } else
1440                                 goto push_packet;
1441                 }
1442                 goto skip;
1443               oom:
1444                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1445                 /* Drop the packet and recycle skb */
1446                 ++dev->estats.rx_dropped_oom;
1447                 emac_recycle_rx_skb(dev, slot, 0);
1448                 goto next;
1449         }
1450
1451         if (received) {
1452                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1453                 dev->rx_slot = slot;
1454         }
1455
1456         if (unlikely(budget && dev->commac.rx_stopped)) {
1457                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1458
1459                 barrier();
1460                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1461                         DBG2("%d: rx restart" NL, dev->def->index);
1462                         received = 0;
1463                         goto again;
1464                 }
1465
1466                 if (dev->rx_sg_skb) {
1467                         DBG2("%d: dropping partial rx packet" NL,
1468                              dev->def->index);
1469                         ++dev->estats.rx_dropped_error;
1470                         dev_kfree_skb(dev->rx_sg_skb);
1471                         dev->rx_sg_skb = NULL;
1472                 }
1473
1474                 dev->commac.rx_stopped = 0;
1475                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1476                 emac_rx_enable(dev);
1477                 dev->rx_slot = 0;
1478         }
1479         return received;
1480 }
1481
1482 /* BHs disabled */
1483 static int emac_peek_rx(void *param)
1484 {
1485         struct ocp_enet_private *dev = param;
1486         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1487 }
1488
1489 /* BHs disabled */
1490 static int emac_peek_rx_sg(void *param)
1491 {
1492         struct ocp_enet_private *dev = param;
1493         int slot = dev->rx_slot;
1494         while (1) {
1495                 u16 ctrl = dev->rx_desc[slot].ctrl;
1496                 if (ctrl & MAL_RX_CTRL_EMPTY)
1497                         return 0;
1498                 else if (ctrl & MAL_RX_CTRL_LAST)
1499                         return 1;
1500
1501                 slot = (slot + 1) % NUM_RX_BUFF;
1502
1503                 /* I'm just being paranoid here :) */
1504                 if (unlikely(slot == dev->rx_slot))
1505                         return 0;
1506         }
1507 }
1508
1509 /* Hard IRQ */
1510 static void emac_rxde(void *param)
1511 {
1512         struct ocp_enet_private *dev = param;
1513         ++dev->estats.rx_stopped;
1514         emac_rx_disable_async(dev);
1515 }
1516
1517 /* Hard IRQ */
1518 static irqreturn_t emac_irq(int irq, void *dev_instance)
1519 {
1520         struct ocp_enet_private *dev = dev_instance;
1521         struct emac_regs __iomem *p = dev->emacp;
1522         struct ibm_emac_error_stats *st = &dev->estats;
1523
1524         u32 isr = in_be32(&p->isr);
1525         out_be32(&p->isr, isr);
1526
1527         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1528
1529         if (isr & EMAC_ISR_TXPE)
1530                 ++st->tx_parity;
1531         if (isr & EMAC_ISR_RXPE)
1532                 ++st->rx_parity;
1533         if (isr & EMAC_ISR_TXUE)
1534                 ++st->tx_underrun;
1535         if (isr & EMAC_ISR_RXOE)
1536                 ++st->rx_fifo_overrun;
1537         if (isr & EMAC_ISR_OVR)
1538                 ++st->rx_overrun;
1539         if (isr & EMAC_ISR_BP)
1540                 ++st->rx_bad_packet;
1541         if (isr & EMAC_ISR_RP)
1542                 ++st->rx_runt_packet;
1543         if (isr & EMAC_ISR_SE)
1544                 ++st->rx_short_event;
1545         if (isr & EMAC_ISR_ALE)
1546                 ++st->rx_alignment_error;
1547         if (isr & EMAC_ISR_BFCS)
1548                 ++st->rx_bad_fcs;
1549         if (isr & EMAC_ISR_PTLE)
1550                 ++st->rx_packet_too_long;
1551         if (isr & EMAC_ISR_ORE)
1552                 ++st->rx_out_of_range;
1553         if (isr & EMAC_ISR_IRE)
1554                 ++st->rx_in_range;
1555         if (isr & EMAC_ISR_SQE)
1556                 ++st->tx_sqe;
1557         if (isr & EMAC_ISR_TE)
1558                 ++st->tx_errors;
1559
1560         return IRQ_HANDLED;
1561 }
1562
1563 static struct net_device_stats *emac_stats(struct net_device *ndev)
1564 {
1565         struct ocp_enet_private *dev = ndev->priv;
1566         struct ibm_emac_stats *st = &dev->stats;
1567         struct ibm_emac_error_stats *est = &dev->estats;
1568         struct net_device_stats *nst = &dev->nstats;
1569
1570         DBG2("%d: stats" NL, dev->def->index);
1571
1572         /* Compute "legacy" statistics */
1573         local_irq_disable();
1574         nst->rx_packets = (unsigned long)st->rx_packets;
1575         nst->rx_bytes = (unsigned long)st->rx_bytes;
1576         nst->tx_packets = (unsigned long)st->tx_packets;
1577         nst->tx_bytes = (unsigned long)st->tx_bytes;
1578         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1579                                           est->rx_dropped_error +
1580                                           est->rx_dropped_resize +
1581                                           est->rx_dropped_mtu);
1582         nst->tx_dropped = (unsigned long)est->tx_dropped;
1583
1584         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1585         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1586                                               est->rx_fifo_overrun +
1587                                               est->rx_overrun);
1588         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1589                                                est->rx_alignment_error);
1590         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1591                                              est->rx_bad_fcs);
1592         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1593                                                 est->rx_bd_short_event +
1594                                                 est->rx_bd_packet_too_long +
1595                                                 est->rx_bd_out_of_range +
1596                                                 est->rx_bd_in_range +
1597                                                 est->rx_runt_packet +
1598                                                 est->rx_short_event +
1599                                                 est->rx_packet_too_long +
1600                                                 est->rx_out_of_range +
1601                                                 est->rx_in_range);
1602
1603         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1604         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1605                                               est->tx_underrun);
1606         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1607         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1608                                           est->tx_bd_excessive_collisions +
1609                                           est->tx_bd_late_collision +
1610                                           est->tx_bd_multple_collisions);
1611         local_irq_enable();
1612         return nst;
1613 }
1614
1615 static void emac_remove(struct ocp_device *ocpdev)
1616 {
1617         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1618
1619         DBG("%d: remove" NL, dev->def->index);
1620
1621         ocp_set_drvdata(ocpdev, NULL);
1622         unregister_netdev(dev->ndev);
1623
1624         tah_fini(dev->tah_dev);
1625         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1626         zmii_fini(dev->zmii_dev, dev->zmii_input);
1627
1628         emac_dbg_register(dev->def->index, NULL);
1629
1630         mal_unregister_commac(dev->mal, &dev->commac);
1631         iounmap(dev->emacp);
1632         kfree(dev->ndev);
1633 }
1634
1635 static struct mal_commac_ops emac_commac_ops = {
1636         .poll_tx = &emac_poll_tx,
1637         .poll_rx = &emac_poll_rx,
1638         .peek_rx = &emac_peek_rx,
1639         .rxde = &emac_rxde,
1640 };
1641
1642 static struct mal_commac_ops emac_commac_sg_ops = {
1643         .poll_tx = &emac_poll_tx,
1644         .poll_rx = &emac_poll_rx,
1645         .peek_rx = &emac_peek_rx_sg,
1646         .rxde = &emac_rxde,
1647 };
1648
1649 /* Ethtool support */
1650 static int emac_ethtool_get_settings(struct net_device *ndev,
1651                                      struct ethtool_cmd *cmd)
1652 {
1653         struct ocp_enet_private *dev = ndev->priv;
1654
1655         cmd->supported = dev->phy.features;
1656         cmd->port = PORT_MII;
1657         cmd->phy_address = dev->phy.address;
1658         cmd->transceiver =
1659             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1660
1661         local_bh_disable();
1662         cmd->advertising = dev->phy.advertising;
1663         cmd->autoneg = dev->phy.autoneg;
1664         cmd->speed = dev->phy.speed;
1665         cmd->duplex = dev->phy.duplex;
1666         local_bh_enable();
1667
1668         return 0;
1669 }
1670
1671 static int emac_ethtool_set_settings(struct net_device *ndev,
1672                                      struct ethtool_cmd *cmd)
1673 {
1674         struct ocp_enet_private *dev = ndev->priv;
1675         u32 f = dev->phy.features;
1676
1677         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1678             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1679
1680         /* Basic sanity checks */
1681         if (dev->phy.address < 0)
1682                 return -EOPNOTSUPP;
1683         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1684                 return -EINVAL;
1685         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1686                 return -EINVAL;
1687         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1688                 return -EINVAL;
1689
1690         if (cmd->autoneg == AUTONEG_DISABLE) {
1691                 switch (cmd->speed) {
1692                 case SPEED_10:
1693                         if (cmd->duplex == DUPLEX_HALF
1694                             && !(f & SUPPORTED_10baseT_Half))
1695                                 return -EINVAL;
1696                         if (cmd->duplex == DUPLEX_FULL
1697                             && !(f & SUPPORTED_10baseT_Full))
1698                                 return -EINVAL;
1699                         break;
1700                 case SPEED_100:
1701                         if (cmd->duplex == DUPLEX_HALF
1702                             && !(f & SUPPORTED_100baseT_Half))
1703                                 return -EINVAL;
1704                         if (cmd->duplex == DUPLEX_FULL
1705                             && !(f & SUPPORTED_100baseT_Full))
1706                                 return -EINVAL;
1707                         break;
1708                 case SPEED_1000:
1709                         if (cmd->duplex == DUPLEX_HALF
1710                             && !(f & SUPPORTED_1000baseT_Half))
1711                                 return -EINVAL;
1712                         if (cmd->duplex == DUPLEX_FULL
1713                             && !(f & SUPPORTED_1000baseT_Full))
1714                                 return -EINVAL;
1715                         break;
1716                 default:
1717                         return -EINVAL;
1718                 }
1719
1720                 local_bh_disable();
1721                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1722                                                 cmd->duplex);
1723
1724         } else {
1725                 if (!(f & SUPPORTED_Autoneg))
1726                         return -EINVAL;
1727
1728                 local_bh_disable();
1729                 dev->phy.def->ops->setup_aneg(&dev->phy,
1730                                               (cmd->advertising & f) |
1731                                               (dev->phy.advertising &
1732                                                (ADVERTISED_Pause |
1733                                                 ADVERTISED_Asym_Pause)));
1734         }
1735         emac_force_link_update(dev);
1736         local_bh_enable();
1737
1738         return 0;
1739 }
1740
1741 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1742                                        struct ethtool_ringparam *rp)
1743 {
1744         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1745         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1746 }
1747
1748 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1749                                         struct ethtool_pauseparam *pp)
1750 {
1751         struct ocp_enet_private *dev = ndev->priv;
1752
1753         local_bh_disable();
1754         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1755             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1756                 pp->autoneg = 1;
1757
1758         if (dev->phy.duplex == DUPLEX_FULL) {
1759                 if (dev->phy.pause)
1760                         pp->rx_pause = pp->tx_pause = 1;
1761                 else if (dev->phy.asym_pause)
1762                         pp->tx_pause = 1;
1763         }
1764         local_bh_enable();
1765 }
1766
1767 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1768 {
1769         struct ocp_enet_private *dev = ndev->priv;
1770         return dev->tah_dev != 0;
1771 }
1772
1773 static int emac_get_regs_len(struct ocp_enet_private *dev)
1774 {
1775         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1776 }
1777
1778 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1779 {
1780         struct ocp_enet_private *dev = ndev->priv;
1781         return sizeof(struct emac_ethtool_regs_hdr) +
1782             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1783             zmii_get_regs_len(dev->zmii_dev) +
1784             rgmii_get_regs_len(dev->rgmii_dev) +
1785             tah_get_regs_len(dev->tah_dev);
1786 }
1787
1788 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1789 {
1790         struct emac_ethtool_regs_subhdr *hdr = buf;
1791
1792         hdr->version = EMAC_ETHTOOL_REGS_VER;
1793         hdr->index = dev->def->index;
1794         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1795         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1796 }
1797
1798 static void emac_ethtool_get_regs(struct net_device *ndev,
1799                                   struct ethtool_regs *regs, void *buf)
1800 {
1801         struct ocp_enet_private *dev = ndev->priv;
1802         struct emac_ethtool_regs_hdr *hdr = buf;
1803
1804         hdr->components = 0;
1805         buf = hdr + 1;
1806
1807         local_irq_disable();
1808         buf = mal_dump_regs(dev->mal, buf);
1809         buf = emac_dump_regs(dev, buf);
1810         if (dev->zmii_dev) {
1811                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1812                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1813         }
1814         if (dev->rgmii_dev) {
1815                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1816                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1817         }
1818         if (dev->tah_dev) {
1819                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1820                 buf = tah_dump_regs(dev->tah_dev, buf);
1821         }
1822         local_irq_enable();
1823 }
1824
1825 static int emac_ethtool_nway_reset(struct net_device *ndev)
1826 {
1827         struct ocp_enet_private *dev = ndev->priv;
1828         int res = 0;
1829
1830         DBG("%d: nway_reset" NL, dev->def->index);
1831
1832         if (dev->phy.address < 0)
1833                 return -EOPNOTSUPP;
1834
1835         local_bh_disable();
1836         if (!dev->phy.autoneg) {
1837                 res = -EINVAL;
1838                 goto out;
1839         }
1840
1841         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1842         emac_force_link_update(dev);
1843
1844       out:
1845         local_bh_enable();
1846         return res;
1847 }
1848
1849 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1850 {
1851         return EMAC_ETHTOOL_STATS_COUNT;
1852 }
1853
1854 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1855                                      u8 * buf)
1856 {
1857         if (stringset == ETH_SS_STATS)
1858                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1859 }
1860
1861 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1862                                            struct ethtool_stats *estats,
1863                                            u64 * tmp_stats)
1864 {
1865         struct ocp_enet_private *dev = ndev->priv;
1866         local_irq_disable();
1867         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1868         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1869         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1870         local_irq_enable();
1871 }
1872
1873 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1874                                      struct ethtool_drvinfo *info)
1875 {
1876         struct ocp_enet_private *dev = ndev->priv;
1877
1878         strcpy(info->driver, "ibm_emac");
1879         strcpy(info->version, DRV_VERSION);
1880         info->fw_version[0] = '\0';
1881         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1882         info->n_stats = emac_ethtool_get_stats_count(ndev);
1883         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1884 }
1885
1886 static const struct ethtool_ops emac_ethtool_ops = {
1887         .get_settings = emac_ethtool_get_settings,
1888         .set_settings = emac_ethtool_set_settings,
1889         .get_drvinfo = emac_ethtool_get_drvinfo,
1890
1891         .get_regs_len = emac_ethtool_get_regs_len,
1892         .get_regs = emac_ethtool_get_regs,
1893
1894         .nway_reset = emac_ethtool_nway_reset,
1895
1896         .get_ringparam = emac_ethtool_get_ringparam,
1897         .get_pauseparam = emac_ethtool_get_pauseparam,
1898
1899         .get_rx_csum = emac_ethtool_get_rx_csum,
1900
1901         .get_strings = emac_ethtool_get_strings,
1902         .get_stats_count = emac_ethtool_get_stats_count,
1903         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1904
1905         .get_link = ethtool_op_get_link,
1906         .get_tx_csum = ethtool_op_get_tx_csum,
1907         .get_sg = ethtool_op_get_sg,
1908 };
1909
1910 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1911 {
1912         struct ocp_enet_private *dev = ndev->priv;
1913         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1914
1915         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1916
1917         if (dev->phy.address < 0)
1918                 return -EOPNOTSUPP;
1919
1920         switch (cmd) {
1921         case SIOCGMIIPHY:
1922         case SIOCDEVPRIVATE:
1923                 data[0] = dev->phy.address;
1924                 /* Fall through */
1925         case SIOCGMIIREG:
1926         case SIOCDEVPRIVATE + 1:
1927                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1928                 return 0;
1929
1930         case SIOCSMIIREG:
1931         case SIOCDEVPRIVATE + 2:
1932                 if (!capable(CAP_NET_ADMIN))
1933                         return -EPERM;
1934                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1935                 return 0;
1936         default:
1937                 return -EOPNOTSUPP;
1938         }
1939 }
1940
1941 static int __init emac_probe(struct ocp_device *ocpdev)
1942 {
1943         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1944         struct net_device *ndev;
1945         struct ocp_device *maldev;
1946         struct ocp_enet_private *dev;
1947         int err, i;
1948
1949         DBG("%d: probe" NL, ocpdev->def->index);
1950
1951         if (!emacdata) {
1952                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1953                        ocpdev->def->index);
1954                 return -ENODEV;
1955         }
1956
1957         /* Allocate our net_device structure */
1958         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1959         if (!ndev) {
1960                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1961                        ocpdev->def->index);
1962                 return -ENOMEM;
1963         }
1964         dev = ndev->priv;
1965         dev->ndev = ndev;
1966         dev->ldev = &ocpdev->dev;
1967         dev->def = ocpdev->def;
1968         SET_MODULE_OWNER(ndev);
1969
1970         /* Find MAL device we are connected to */
1971         maldev =
1972             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1973         if (!maldev) {
1974                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1975                        dev->def->index, emacdata->mal_idx);
1976                 err = -ENODEV;
1977                 goto out;
1978         }
1979         dev->mal = ocp_get_drvdata(maldev);
1980         if (!dev->mal) {
1981                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1982                        dev->def->index, emacdata->mal_idx);
1983                 err = -ENODEV;
1984                 goto out;
1985         }
1986
1987         /* Register with MAL */
1988         dev->commac.ops = &emac_commac_ops;
1989         dev->commac.dev = dev;
1990         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1991         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1992         err = mal_register_commac(dev->mal, &dev->commac);
1993         if (err) {
1994                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1995                        dev->def->index, emacdata->mal_idx);
1996                 goto out;
1997         }
1998         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1999         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2000
2001         /* Get pointers to BD rings */
2002         dev->tx_desc =
2003             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2004                                                  emacdata->mal_tx_chan);
2005         dev->rx_desc =
2006             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2007                                                  emacdata->mal_rx_chan);
2008
2009         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2010         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2011
2012         /* Clean rings */
2013         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2014         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2015
2016         /* If we depend on another EMAC for MDIO, check whether it was probed already */
2017         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2018                 struct ocp_device *mdiodev =
2019                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2020                                     emacdata->mdio_idx);
2021                 if (!mdiodev) {
2022                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2023                                dev->def->index, emacdata->mdio_idx);
2024                         err = -ENODEV;
2025                         goto out2;
2026                 }
2027                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2028                 if (!dev->mdio_dev) {
2029                         printk(KERN_ERR
2030                                "emac%d: emac%d hasn't been initialized yet!\n",
2031                                dev->def->index, emacdata->mdio_idx);
2032                         err = -ENODEV;
2033                         goto out2;
2034                 }
2035         }
2036
2037         /* Attach to ZMII, if needed */
2038         if ((err = zmii_attach(dev)) != 0)
2039                 goto out2;
2040
2041         /* Attach to RGMII, if needed */
2042         if ((err = rgmii_attach(dev)) != 0)
2043                 goto out3;
2044
2045         /* Attach to TAH, if needed */
2046         if ((err = tah_attach(dev)) != 0)
2047                 goto out4;
2048
2049         /* Map EMAC regs */
2050         dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2051         if (!dev->emacp) {
2052                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2053                        dev->def->index);
2054                 err = -ENOMEM;
2055                 goto out5;
2056         }
2057
2058         /* Fill in MAC address */
2059         for (i = 0; i < 6; ++i)
2060                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2061
2062         /* Set some link defaults before we can find out real parameters */
2063         dev->phy.speed = SPEED_100;
2064         dev->phy.duplex = DUPLEX_FULL;
2065         dev->phy.autoneg = AUTONEG_DISABLE;
2066         dev->phy.pause = dev->phy.asym_pause = 0;
2067         dev->stop_timeout = STOP_TIMEOUT_100;
2068         init_timer(&dev->link_timer);
2069         dev->link_timer.function = emac_link_timer;
2070         dev->link_timer.data = (unsigned long)dev;
2071
2072         /* Find PHY if any */
2073         dev->phy.dev = ndev;
2074         dev->phy.mode = emacdata->phy_mode;
2075         if (emacdata->phy_map != 0xffffffff) {
2076                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2077                 u32 adv;
2078
2079                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2080                     emacdata->phy_map, busy_phy_map);
2081
2082                 EMAC_RX_CLK_TX(dev->def->index);
2083
2084                 dev->phy.mdio_read = emac_mdio_read;
2085                 dev->phy.mdio_write = emac_mdio_write;
2086
2087                 /* Configure EMAC with defaults so we can at least use MDIO
2088                  * This is needed mostly for 440GX
2089                  */
2090                 if (emac_phy_gpcs(dev->phy.mode)) {
2091                         /* XXX
2092                          * Make GPCS PHY address equal to EMAC index.
2093                          * We probably should take into account busy_phy_map
2094                          * and/or phy_map here.
2095                          */
2096                         dev->phy.address = dev->def->index;
2097                 }
2098                 
2099                 emac_configure(dev);
2100
2101                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2102                         if (!(phy_map & 1)) {
2103                                 int r;
2104                                 busy_phy_map |= 1 << i;
2105
2106                                 /* Quick check if there is a PHY at the address */
2107                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2108                                 if (r == 0xffff || r < 0)
2109                                         continue;
2110                                 if (!mii_phy_probe(&dev->phy, i))
2111                                         break;
2112                         }
2113                 if (i == 0x20) {
2114                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2115                                dev->def->index);
2116                         goto out6;
2117                 }
2118
2119                 /* Init PHY */
2120                 if (dev->phy.def->ops->init)
2121                         dev->phy.def->ops->init(&dev->phy);
2122                 
2123                 /* Disable any PHY features not supported by the platform */
2124                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2125
2126                 /* Setup initial link parameters */
2127                 if (dev->phy.features & SUPPORTED_Autoneg) {
2128                         adv = dev->phy.features;
2129 #if !defined(CONFIG_40x)
2130                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2131 #endif
2132                         /* Restart autonegotiation */
2133                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2134                 } else {
2135                         u32 f = dev->phy.def->features;
2136                         int speed = SPEED_10, fd = DUPLEX_HALF;
2137
2138                         /* Select highest supported speed/duplex */
2139                         if (f & SUPPORTED_1000baseT_Full) {
2140                                 speed = SPEED_1000;
2141                                 fd = DUPLEX_FULL;
2142                         } else if (f & SUPPORTED_1000baseT_Half)
2143                                 speed = SPEED_1000;
2144                         else if (f & SUPPORTED_100baseT_Full) {
2145                                 speed = SPEED_100;
2146                                 fd = DUPLEX_FULL;
2147                         } else if (f & SUPPORTED_100baseT_Half)
2148                                 speed = SPEED_100;
2149                         else if (f & SUPPORTED_10baseT_Full)
2150                                 fd = DUPLEX_FULL;
2151
2152                         /* Force link parameters */
2153                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2154                 }
2155         } else {
2156                 emac_reset(dev);
2157
2158                 /* PHY-less configuration.
2159                  * XXX I probably should move these settings to emacdata
2160                  */
2161                 dev->phy.address = -1;
2162                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2163                 dev->phy.pause = 1;
2164         }
2165
2166         /* Fill in the driver function table */
2167         ndev->open = &emac_open;
2168         if (dev->tah_dev) {
2169                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2170                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2171         } else
2172                 ndev->hard_start_xmit = &emac_start_xmit;
2173         ndev->tx_timeout = &emac_full_tx_reset;
2174         ndev->watchdog_timeo = 5 * HZ;
2175         ndev->stop = &emac_close;
2176         ndev->get_stats = &emac_stats;
2177         ndev->set_multicast_list = &emac_set_multicast_list;
2178         ndev->do_ioctl = &emac_ioctl;
2179         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2180                 ndev->change_mtu = &emac_change_mtu;
2181                 dev->commac.ops = &emac_commac_sg_ops;
2182         }
2183         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2184
2185         netif_carrier_off(ndev);
2186         netif_stop_queue(ndev);
2187
2188         err = register_netdev(ndev);
2189         if (err) {
2190                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2191                        dev->def->index, err);
2192                 goto out6;
2193         }
2194
2195         ocp_set_drvdata(ocpdev, dev);
2196
2197         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2198                ndev->name, dev->def->index,
2199                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2200                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2201
2202         if (dev->phy.address >= 0)
2203                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2204                        dev->phy.def->name, dev->phy.address);
2205
2206         emac_dbg_register(dev->def->index, dev);
2207
2208         return 0;
2209       out6:
2210         iounmap(dev->emacp);
2211       out5:
2212         tah_fini(dev->tah_dev);
2213       out4:
2214         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2215       out3:
2216         zmii_fini(dev->zmii_dev, dev->zmii_input);
2217       out2:
2218         mal_unregister_commac(dev->mal, &dev->commac);
2219       out:
2220         kfree(ndev);
2221         return err;
2222 }
2223
2224 static struct ocp_device_id emac_ids[] = {
2225         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2226         { .vendor = OCP_VENDOR_INVALID}
2227 };
2228
2229 static struct ocp_driver emac_driver = {
2230         .name = "emac",
2231         .id_table = emac_ids,
2232         .probe = emac_probe,
2233         .remove = emac_remove,
2234 };
2235
2236 static int __init emac_init(void)
2237 {
2238         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2239
2240         DBG(": init" NL);
2241
2242         if (mal_init())
2243                 return -ENODEV;
2244
2245         EMAC_CLK_INTERNAL;
2246         if (ocp_register_driver(&emac_driver)) {
2247                 EMAC_CLK_EXTERNAL;
2248                 ocp_unregister_driver(&emac_driver);
2249                 mal_exit();
2250                 return -ENODEV;
2251         }
2252         EMAC_CLK_EXTERNAL;
2253
2254         emac_init_debug();
2255         return 0;
2256 }
2257
2258 static void __exit emac_exit(void)
2259 {
2260         DBG(": exit" NL);
2261         ocp_unregister_driver(&emac_driver);
2262         mal_exit();
2263         emac_fini_debug();
2264 }
2265
2266 module_init(emac_init);
2267 module_exit(emac_exit);