2  * drivers/net/ibm_emac/ibm_emac_core.c
 
   4  * Driver for PowerPC 4xx on-chip ethernet controller.
 
   6  * Copyright (c) 2004, 2005 Zultys Technologies.
 
   7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
 
   9  * Based on original work by
 
  10  *      Matt Porter <mporter@kernel.crashing.org>
 
  11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
 
  12  *      Armin Kuster <akuster@mvista.com>
 
  13  *      Johnnie Peters <jpeters@mvista.com>
 
  15  * This program is free software; you can redistribute  it and/or modify it
 
  16  * under  the terms of  the GNU General  Public License as published by the
 
  17  * Free Software Foundation;  either version 2 of the  License, or (at your
 
  18  * option) any later version.
 
  22 #include <linux/module.h>
 
  23 #include <linux/kernel.h>
 
  24 #include <linux/sched.h>
 
  25 #include <linux/string.h>
 
  26 #include <linux/errno.h>
 
  27 #include <linux/interrupt.h>
 
  28 #include <linux/delay.h>
 
  29 #include <linux/init.h>
 
  30 #include <linux/types.h>
 
  31 #include <linux/pci.h>
 
  32 #include <linux/netdevice.h>
 
  33 #include <linux/etherdevice.h>
 
  34 #include <linux/skbuff.h>
 
  35 #include <linux/crc32.h>
 
  36 #include <linux/ethtool.h>
 
  37 #include <linux/mii.h>
 
  38 #include <linux/bitops.h>
 
  40 #include <asm/processor.h>
 
  43 #include <asm/uaccess.h>
 
  46 #include "ibm_emac_core.h"
 
  47 #include "ibm_emac_debug.h"
 
  50  * Lack of dma_unmap_???? calls is intentional.
 
  52  * API-correct usage requires additional support state information to be 
 
  53  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
 
  54  * EMAC design (e.g. TX buffer passed from network stack can be split into
 
  55  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
 
  56  * maintaining such information will add additional overhead.
 
  57  * Current DMA API implementation for 4xx processors only ensures cache coherency
 
  58  * and dma_unmap_???? routines are empty and are likely to stay this way.
 
  59  * I decided to omit dma_unmap_??? calls because I don't want to add additional
 
  60  * complexity just for the sake of following some abstract API, when it doesn't
 
  61  * add any real benefit to the driver. I understand that this decision maybe 
 
  62  * controversial, but I really tried to make code API-correct and efficient 
 
  63  * at the same time and didn't come up with code I liked :(.                --ebs
 
  66 #define DRV_NAME        "emac"
 
  67 #define DRV_VERSION     "3.54"
 
  68 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
 
  70 MODULE_DESCRIPTION(DRV_DESC);
 
  72     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
 
  73 MODULE_LICENSE("GPL");
 
  75 /* minimum number of free TX descriptors required to wake up TX process */
 
  76 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
 
  78 /* If packet size is less than this number, we allocate small skb and copy packet 
 
  79  * contents into it instead of just sending original big skb up
 
  81 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
 
  83 /* Since multiple EMACs share MDIO lines in various ways, we need
 
  84  * to avoid re-using the same PHY ID in cases where the arch didn't
 
  85  * setup precise phy_map entries
 
  87 static u32 busy_phy_map;
 
  89 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
 
  90     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
 
  91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
 
  92  * with PHY RX clock problem.
 
  93  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
 
  94  * also allows controlling each EMAC clock
 
  96 static inline void EMAC_RX_CLK_TX(int idx)
 
  99         local_irq_save(flags);
 
 101 #if defined(CONFIG_405EP)
 
 102         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
 
 103 #else /* CONFIG_440EP || CONFIG_440GR */
 
 104         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
 
 107         local_irq_restore(flags);
 
 110 static inline void EMAC_RX_CLK_DEFAULT(int idx)
 
 113         local_irq_save(flags);
 
 115 #if defined(CONFIG_405EP)
 
 116         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
 
 117 #else /* CONFIG_440EP */
 
 118         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
 
 121         local_irq_restore(flags);
 
 124 #define EMAC_RX_CLK_TX(idx)             ((void)0)
 
 125 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
 
 128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
 
 129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
 
 130  * unfortunately this is less flexible than 440EP case, because it's a global 
 
 131  * setting for all EMACs, therefore we do this clock trick only during probe.
 
 133 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
 
 134                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
 
 135 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
 
 136                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
 
 138 #define EMAC_CLK_INTERNAL               ((void)0)
 
 139 #define EMAC_CLK_EXTERNAL               ((void)0)
 
 142 /* I don't want to litter system log with timeout errors 
 
 143  * when we have brain-damaged PHY.
 
 145 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
 
 148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
 
 149         DBG("%d: %s" NL, dev->def->index, error);
 
 152                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
 
 156 /* PHY polling intervals */
 
 157 #define PHY_POLL_LINK_ON        HZ
 
 158 #define PHY_POLL_LINK_OFF       (HZ / 5)
 
 160 /* Graceful stop timeouts in us. 
 
 161  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
 
 163 #define STOP_TIMEOUT_10         1230    
 
 164 #define STOP_TIMEOUT_100        124
 
 165 #define STOP_TIMEOUT_1000       13
 
 166 #define STOP_TIMEOUT_1000_JUMBO 73
 
 168 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
 
 169 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
 
 170         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
 
 171         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
 
 172         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
 
 173         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
 
 174         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
 
 175         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
 
 176         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
 
 177         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
 
 178         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
 
 179         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
 
 180         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
 
 181         "tx_bd_excessive_collisions", "tx_bd_late_collision",
 
 182         "tx_bd_multple_collisions", "tx_bd_single_collision",
 
 183         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
 
 187 static irqreturn_t emac_irq(int irq, void *dev_instance);
 
 188 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
 
 190 static inline int emac_phy_supports_gige(int phy_mode)
 
 192         return  phy_mode == PHY_MODE_GMII ||
 
 193                 phy_mode == PHY_MODE_RGMII ||
 
 194                 phy_mode == PHY_MODE_TBI ||
 
 195                 phy_mode == PHY_MODE_RTBI;
 
 198 static inline int emac_phy_gpcs(int phy_mode)
 
 200         return  phy_mode == PHY_MODE_TBI ||
 
 201                 phy_mode == PHY_MODE_RTBI;
 
 204 static inline void emac_tx_enable(struct ocp_enet_private *dev)
 
 206         struct emac_regs __iomem *p = dev->emacp;
 
 210         local_irq_save(flags);
 
 212         DBG("%d: tx_enable" NL, dev->def->index);
 
 214         r = in_be32(&p->mr0);
 
 215         if (!(r & EMAC_MR0_TXE))
 
 216                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
 
 217         local_irq_restore(flags);
 
 220 static void emac_tx_disable(struct ocp_enet_private *dev)
 
 222         struct emac_regs __iomem *p = dev->emacp;
 
 226         local_irq_save(flags);
 
 228         DBG("%d: tx_disable" NL, dev->def->index);
 
 230         r = in_be32(&p->mr0);
 
 231         if (r & EMAC_MR0_TXE) {
 
 232                 int n = dev->stop_timeout;
 
 233                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
 
 234                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
 
 239                         emac_report_timeout_error(dev, "TX disable timeout");
 
 241         local_irq_restore(flags);
 
 244 static void emac_rx_enable(struct ocp_enet_private *dev)
 
 246         struct emac_regs __iomem *p = dev->emacp;
 
 250         local_irq_save(flags);
 
 251         if (unlikely(dev->commac.rx_stopped))
 
 254         DBG("%d: rx_enable" NL, dev->def->index);
 
 256         r = in_be32(&p->mr0);
 
 257         if (!(r & EMAC_MR0_RXE)) {
 
 258                 if (unlikely(!(r & EMAC_MR0_RXI))) {
 
 259                         /* Wait if previous async disable is still in progress */
 
 260                         int n = dev->stop_timeout;
 
 261                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 
 266                                 emac_report_timeout_error(dev,
 
 267                                                           "RX disable timeout");
 
 269                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
 
 272         local_irq_restore(flags);
 
 275 static void emac_rx_disable(struct ocp_enet_private *dev)
 
 277         struct emac_regs __iomem *p = dev->emacp;
 
 281         local_irq_save(flags);
 
 283         DBG("%d: rx_disable" NL, dev->def->index);
 
 285         r = in_be32(&p->mr0);
 
 286         if (r & EMAC_MR0_RXE) {
 
 287                 int n = dev->stop_timeout;
 
 288                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 
 289                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 
 294                         emac_report_timeout_error(dev, "RX disable timeout");
 
 296         local_irq_restore(flags);
 
 299 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
 
 301         struct emac_regs __iomem *p = dev->emacp;
 
 305         local_irq_save(flags);
 
 307         DBG("%d: rx_disable_async" NL, dev->def->index);
 
 309         r = in_be32(&p->mr0);
 
 310         if (r & EMAC_MR0_RXE)
 
 311                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 
 312         local_irq_restore(flags);
 
 315 static int emac_reset(struct ocp_enet_private *dev)
 
 317         struct emac_regs __iomem *p = dev->emacp;
 
 321         DBG("%d: reset" NL, dev->def->index);
 
 323         local_irq_save(flags);
 
 325         if (!dev->reset_failed) {
 
 326                 /* 40x erratum suggests stopping RX channel before reset,
 
 329                 emac_rx_disable(dev);
 
 330                 emac_tx_disable(dev);
 
 333         out_be32(&p->mr0, EMAC_MR0_SRST);
 
 334         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
 
 336         local_irq_restore(flags);
 
 339                 dev->reset_failed = 0;
 
 342                 emac_report_timeout_error(dev, "reset timeout");
 
 343                 dev->reset_failed = 1;
 
 348 static void emac_hash_mc(struct ocp_enet_private *dev)
 
 350         struct emac_regs __iomem *p = dev->emacp;
 
 352         struct dev_mc_list *dmi;
 
 354         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
 
 356         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
 
 358                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
 
 360                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
 
 361                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
 
 363                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
 
 364                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
 
 366         out_be32(&p->gaht1, gaht[0]);
 
 367         out_be32(&p->gaht2, gaht[1]);
 
 368         out_be32(&p->gaht3, gaht[2]);
 
 369         out_be32(&p->gaht4, gaht[3]);
 
 372 static inline u32 emac_iff2rmr(struct net_device *ndev)
 
 374         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
 
 377         if (ndev->flags & IFF_PROMISC)
 
 379         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
 
 381         else if (ndev->mc_count > 0)
 
 387 static inline int emac_opb_mhz(void)
 
 389         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
 
 393 static int emac_configure(struct ocp_enet_private *dev)
 
 395         struct emac_regs __iomem *p = dev->emacp;
 
 396         struct net_device *ndev = dev->ndev;
 
 400         DBG("%d: configure" NL, dev->def->index);
 
 402         if (emac_reset(dev) < 0)
 
 405         tah_reset(dev->tah_dev);
 
 408         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
 
 409         if (dev->phy.duplex == DUPLEX_FULL)
 
 410                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
 
 411         dev->stop_timeout = STOP_TIMEOUT_10;
 
 412         switch (dev->phy.speed) {
 
 414                 if (emac_phy_gpcs(dev->phy.mode)) {
 
 415                         r |= EMAC_MR1_MF_1000GPCS |
 
 416                             EMAC_MR1_MF_IPPA(dev->phy.address);
 
 418                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
 
 419                          * identify this GPCS PHY later.
 
 421                         out_be32(&p->ipcr, 0xdeadbeef);
 
 423                         r |= EMAC_MR1_MF_1000;
 
 424                 r |= EMAC_MR1_RFS_16K;
 
 427                 if (dev->ndev->mtu > ETH_DATA_LEN) {
 
 429                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
 
 431                         dev->stop_timeout = STOP_TIMEOUT_1000;
 
 434                 r |= EMAC_MR1_MF_100;
 
 435                 dev->stop_timeout = STOP_TIMEOUT_100;
 
 438                 r |= EMAC_MR1_RFS_4K;
 
 444                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
 
 447                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
 
 449 #if !defined(CONFIG_40x)
 
 450         /* on 40x erratum forces us to NOT use integrated flow control, 
 
 451          * let's hope it works on 44x ;)
 
 453         if (dev->phy.duplex == DUPLEX_FULL) {
 
 455                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
 
 456                 else if (dev->phy.asym_pause)
 
 460         out_be32(&p->mr1, r);
 
 462         /* Set individual MAC address */
 
 463         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
 
 464         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
 
 465                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
 
 468         /* VLAN Tag Protocol ID */
 
 469         out_be32(&p->vtpid, 0x8100);
 
 471         /* Receive mode register */
 
 472         r = emac_iff2rmr(ndev);
 
 473         if (r & EMAC_RMR_MAE)
 
 475         out_be32(&p->rmr, r);
 
 477         /* FIFOs thresholds */
 
 478         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
 
 479                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
 
 480         out_be32(&p->tmr1, r);
 
 481         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
 
 483         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
 
 484            there should be still enough space in FIFO to allow the our link
 
 485            partner time to process this frame and also time to send PAUSE 
 
 488            Here is the worst case scenario for the RX FIFO "headroom"
 
 489            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
 
 491            1) One maximum-length frame on TX                    1522 bytes
 
 492            2) One PAUSE frame time                                64 bytes
 
 493            3) PAUSE frame decode time allowance                   64 bytes
 
 494            4) One maximum-length frame on RX                    1522 bytes
 
 495            5) Round-trip propagation delay of the link (100Mb)    15 bytes
 
 499            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
 
 500            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
 
 502         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
 
 503                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
 
 504         out_be32(&p->rwmr, r);
 
 506         /* Set PAUSE timer to the maximum */
 
 507         out_be32(&p->ptr, 0xffff);
 
 510         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
 
 511                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
 
 512                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
 
 513                  EMAC_ISR_IRE | EMAC_ISR_TE);
 
 515         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
 
 516         if (emac_phy_gpcs(dev->phy.mode)) 
 
 517                 mii_reset_phy(&dev->phy);
 
 523 static void emac_reinitialize(struct ocp_enet_private *dev)
 
 525         DBG("%d: reinitialize" NL, dev->def->index);
 
 527         if (!emac_configure(dev)) {
 
 534 static void emac_full_tx_reset(struct net_device *ndev)
 
 536         struct ocp_enet_private *dev = ndev->priv;
 
 537         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 539         DBG("%d: full_tx_reset" NL, dev->def->index);
 
 541         emac_tx_disable(dev);
 
 542         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 543         emac_clean_tx_ring(dev);
 
 544         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
 
 548         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 552         netif_wake_queue(ndev);
 
 555 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
 
 557         struct emac_regs __iomem *p = dev->emacp;
 
 561         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
 
 563         /* Enable proper MDIO port */
 
 564         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
 
 566         /* Wait for management interface to become idle */
 
 568         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 574         /* Issue read command */
 
 576                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
 
 577                  (reg & EMAC_STACR_PRA_MASK)
 
 578                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
 
 581         /* Wait for read to complete */
 
 583         while (!emac_phy_done(r = in_be32(&p->stacr))) {
 
 589         if (unlikely(r & EMAC_STACR_PHYE)) {
 
 590                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
 
 595         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
 
 596         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
 
 599         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
 
 603 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
 
 606         struct emac_regs __iomem *p = dev->emacp;
 
 609         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
 
 612         /* Enable proper MDIO port */
 
 613         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
 
 615         /* Wait for management interface to be idle */
 
 617         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 623         /* Issue write command */
 
 625                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
 
 626                  (reg & EMAC_STACR_PRA_MASK) |
 
 627                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
 
 628                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
 
 630         /* Wait for write to complete */
 
 632         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 639         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
 
 642 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
 
 644         struct ocp_enet_private *dev = ndev->priv;
 
 648         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
 
 654 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
 
 656         struct ocp_enet_private *dev = ndev->priv;
 
 659         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
 
 660                           (u8) reg, (u16) val);
 
 665 static void emac_set_multicast_list(struct net_device *ndev)
 
 667         struct ocp_enet_private *dev = ndev->priv;
 
 668         struct emac_regs __iomem *p = dev->emacp;
 
 669         u32 rmr = emac_iff2rmr(ndev);
 
 671         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
 
 672         BUG_ON(!netif_running(dev->ndev));
 
 674         /* I decided to relax register access rules here to avoid
 
 677          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
 
 678          * in MR1 register and do a full EMAC reset.
 
 679          * One TX BD status update is delayed and, after EMAC reset, it 
 
 680          * never happens, resulting in TX hung (it'll be recovered by TX 
 
 681          * timeout handler eventually, but this is just gross).
 
 682          * So we either have to do full TX reset or try to cheat here :)
 
 684          * The only required change is to RX mode register, so I *think* all
 
 685          * we need is just to stop RX channel. This seems to work on all
 
 688         emac_rx_disable(dev);
 
 689         if (rmr & EMAC_RMR_MAE)
 
 691         out_be32(&p->rmr, rmr);
 
 696 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
 
 698         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 699         int rx_sync_size = emac_rx_sync_size(new_mtu);
 
 700         int rx_skb_size = emac_rx_skb_size(new_mtu);
 
 703         emac_rx_disable(dev);
 
 704         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 706         if (dev->rx_sg_skb) {
 
 707                 ++dev->estats.rx_dropped_resize;
 
 708                 dev_kfree_skb(dev->rx_sg_skb);
 
 709                 dev->rx_sg_skb = NULL;
 
 712         /* Make a first pass over RX ring and mark BDs ready, dropping 
 
 713          * non-processed packets on the way. We need this as a separate pass
 
 714          * to simplify error recovery in the case of allocation failure later.
 
 716         for (i = 0; i < NUM_RX_BUFF; ++i) {
 
 717                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
 
 718                         ++dev->estats.rx_dropped_resize;
 
 720                 dev->rx_desc[i].data_len = 0;
 
 721                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
 
 722                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
 725         /* Reallocate RX ring only if bigger skb buffers are required */
 
 726         if (rx_skb_size <= dev->rx_skb_size)
 
 729         /* Second pass, allocate new skbs */
 
 730         for (i = 0; i < NUM_RX_BUFF; ++i) {
 
 731                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
 
 737                 BUG_ON(!dev->rx_skb[i]);
 
 738                 dev_kfree_skb(dev->rx_skb[i]);
 
 740                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
 
 741                 dev->rx_desc[i].data_ptr =
 
 742                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
 
 743                                    DMA_FROM_DEVICE) + 2;
 
 744                 dev->rx_skb[i] = skb;
 
 747         /* Check if we need to change "Jumbo" bit in MR1 */
 
 748         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
 
 749                 /* This is to prevent starting RX channel in emac_rx_enable() */
 
 750                 dev->commac.rx_stopped = 1;
 
 752                 dev->ndev->mtu = new_mtu;
 
 753                 emac_full_tx_reset(dev->ndev);
 
 756         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
 
 759         dev->commac.rx_stopped = dev->rx_slot = 0;
 
 760         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 766 /* Process ctx, rtnl_lock semaphore */
 
 767 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
 
 769         struct ocp_enet_private *dev = ndev->priv;
 
 772         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
 
 775         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
 
 778         if (netif_running(ndev)) {
 
 779                 /* Check if we really need to reinitalize RX ring */
 
 780                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
 
 781                         ret = emac_resize_rx_ring(dev, new_mtu);
 
 786                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
 
 787                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
 
 794 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
 
 797         for (i = 0; i < NUM_TX_BUFF; ++i) {
 
 798                 if (dev->tx_skb[i]) {
 
 799                         dev_kfree_skb(dev->tx_skb[i]);
 
 800                         dev->tx_skb[i] = NULL;
 
 801                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
 
 802                                 ++dev->estats.tx_dropped;
 
 804                 dev->tx_desc[i].ctrl = 0;
 
 805                 dev->tx_desc[i].data_ptr = 0;
 
 809 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
 
 812         for (i = 0; i < NUM_RX_BUFF; ++i)
 
 813                 if (dev->rx_skb[i]) {
 
 814                         dev->rx_desc[i].ctrl = 0;
 
 815                         dev_kfree_skb(dev->rx_skb[i]);
 
 816                         dev->rx_skb[i] = NULL;
 
 817                         dev->rx_desc[i].data_ptr = 0;
 
 820         if (dev->rx_sg_skb) {
 
 821                 dev_kfree_skb(dev->rx_sg_skb);
 
 822                 dev->rx_sg_skb = NULL;
 
 826 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
 
 829         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
 
 833         dev->rx_skb[slot] = skb;
 
 834         dev->rx_desc[slot].data_len = 0;
 
 836         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
 
 837         dev->rx_desc[slot].data_ptr = 
 
 838             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
 
 839                            DMA_FROM_DEVICE) + 2;
 
 841         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
 
 842             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
 847 static void emac_print_link_status(struct ocp_enet_private *dev)
 
 849         if (netif_carrier_ok(dev->ndev))
 
 850                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
 
 851                        dev->ndev->name, dev->phy.speed,
 
 852                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
 
 853                        dev->phy.pause ? ", pause enabled" :
 
 854                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
 
 856                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
 
 859 /* Process ctx, rtnl_lock semaphore */
 
 860 static int emac_open(struct net_device *ndev)
 
 862         struct ocp_enet_private *dev = ndev->priv;
 
 863         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 866         DBG("%d: open" NL, dev->def->index);
 
 868         /* Setup error IRQ handler */
 
 869         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
 
 871                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
 
 872                        ndev->name, dev->def->irq);
 
 876         /* Allocate RX ring */
 
 877         for (i = 0; i < NUM_RX_BUFF; ++i)
 
 878                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
 
 879                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
 
 885         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
 
 886             dev->commac.rx_stopped = 0;
 
 887         dev->rx_sg_skb = NULL;
 
 889         if (dev->phy.address >= 0) {
 
 890                 int link_poll_interval;
 
 891                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
 
 892                         dev->phy.def->ops->read_link(&dev->phy);
 
 893                         EMAC_RX_CLK_DEFAULT(dev->def->index);
 
 894                         netif_carrier_on(dev->ndev);
 
 895                         link_poll_interval = PHY_POLL_LINK_ON;
 
 897                         EMAC_RX_CLK_TX(dev->def->index);
 
 898                         netif_carrier_off(dev->ndev);
 
 899                         link_poll_interval = PHY_POLL_LINK_OFF;
 
 901                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
 
 902                 emac_print_link_status(dev);
 
 904                 netif_carrier_on(dev->ndev);
 
 907         mal_poll_add(dev->mal, &dev->commac);
 
 908         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 909         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
 
 910         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 913         netif_start_queue(ndev);
 
 918         emac_clean_rx_ring(dev);
 
 919         free_irq(dev->def->irq, dev);
 
 924 static int emac_link_differs(struct ocp_enet_private *dev)
 
 926         u32 r = in_be32(&dev->emacp->mr1);
 
 928         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
 
 929         int speed, pause, asym_pause;
 
 931         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
 
 933         else if (r & EMAC_MR1_MF_100)
 
 938         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
 
 939         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
 
 948                 pause = asym_pause = 0;
 
 950         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
 
 951             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
 
 955 static void emac_link_timer(unsigned long data)
 
 957         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
 
 958         int link_poll_interval;
 
 960         DBG2("%d: link timer" NL, dev->def->index);
 
 962         if (dev->phy.def->ops->poll_link(&dev->phy)) {
 
 963                 if (!netif_carrier_ok(dev->ndev)) {
 
 964                         EMAC_RX_CLK_DEFAULT(dev->def->index);
 
 966                         /* Get new link parameters */
 
 967                         dev->phy.def->ops->read_link(&dev->phy);
 
 969                         if (dev->tah_dev || emac_link_differs(dev))
 
 970                                 emac_full_tx_reset(dev->ndev);
 
 972                         netif_carrier_on(dev->ndev);
 
 973                         emac_print_link_status(dev);
 
 975                 link_poll_interval = PHY_POLL_LINK_ON;
 
 977                 if (netif_carrier_ok(dev->ndev)) {
 
 978                         EMAC_RX_CLK_TX(dev->def->index);
 
 979 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
 
 980                         emac_reinitialize(dev);
 
 982                         netif_carrier_off(dev->ndev);
 
 983                         emac_print_link_status(dev);
 
 986                 /* Retry reset if the previous attempt failed.
 
 987                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
 
 988                  * case, but I left it here because it shouldn't trigger for
 
 991                 if (unlikely(dev->reset_failed))
 
 992                         emac_reinitialize(dev);
 
 994                 link_poll_interval = PHY_POLL_LINK_OFF;
 
 996         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
 
1000 static void emac_force_link_update(struct ocp_enet_private *dev)
 
1002         netif_carrier_off(dev->ndev);
 
1003         if (timer_pending(&dev->link_timer))
 
1004                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
 
1007 /* Process ctx, rtnl_lock semaphore */
 
1008 static int emac_close(struct net_device *ndev)
 
1010         struct ocp_enet_private *dev = ndev->priv;
 
1011         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
1013         DBG("%d: close" NL, dev->def->index);
 
1017         if (dev->phy.address >= 0)
 
1018                 del_timer_sync(&dev->link_timer);
 
1020         netif_stop_queue(ndev);
 
1021         emac_rx_disable(dev);
 
1022         emac_tx_disable(dev);
 
1023         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
1024         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
1025         mal_poll_del(dev->mal, &dev->commac);
 
1028         emac_clean_tx_ring(dev);
 
1029         emac_clean_rx_ring(dev);
 
1030         free_irq(dev->def->irq, dev);
 
1035 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
 
1036                                struct sk_buff *skb)
 
1038 #if defined(CONFIG_IBM_EMAC_TAH)
 
1039         if (skb->ip_summed == CHECKSUM_PARTIAL) {
 
1040                 ++dev->stats.tx_packets_csum;
 
1041                 return EMAC_TX_CTRL_TAH_CSUM;
 
1047 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
 
1049         struct emac_regs __iomem *p = dev->emacp;
 
1050         struct net_device *ndev = dev->ndev;
 
1052         /* Send the packet out */
 
1053         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
 
1055         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
 
1056                 netif_stop_queue(ndev);
 
1057                 DBG2("%d: stopped TX queue" NL, dev->def->index);
 
1060         ndev->trans_start = jiffies;
 
1061         ++dev->stats.tx_packets;
 
1062         dev->stats.tx_bytes += len;
 
1068 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
1070         struct ocp_enet_private *dev = ndev->priv;
 
1071         unsigned int len = skb->len;
 
1074         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
 
1075             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
 
1077         slot = dev->tx_slot++;
 
1078         if (dev->tx_slot == NUM_TX_BUFF) {
 
1080                 ctrl |= MAL_TX_CTRL_WRAP;
 
1083         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
 
1085         dev->tx_skb[slot] = skb;
 
1086         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
 
1088         dev->tx_desc[slot].data_len = (u16) len;
 
1090         dev->tx_desc[slot].ctrl = ctrl;
 
1092         return emac_xmit_finish(dev, len);
 
1095 #if defined(CONFIG_IBM_EMAC_TAH)
 
1096 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
 
1097                                   u32 pd, int len, int last, u16 base_ctrl)
 
1100                 u16 ctrl = base_ctrl;
 
1101                 int chunk = min(len, MAL_MAX_TX_SIZE);
 
1104                 slot = (slot + 1) % NUM_TX_BUFF;
 
1107                         ctrl |= MAL_TX_CTRL_LAST;
 
1108                 if (slot == NUM_TX_BUFF - 1)
 
1109                         ctrl |= MAL_TX_CTRL_WRAP;
 
1111                 dev->tx_skb[slot] = NULL;
 
1112                 dev->tx_desc[slot].data_ptr = pd;
 
1113                 dev->tx_desc[slot].data_len = (u16) chunk;
 
1114                 dev->tx_desc[slot].ctrl = ctrl;
 
1125 /* BHs disabled (SG version for TAH equipped EMACs) */
 
1126 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
 
1128         struct ocp_enet_private *dev = ndev->priv;
 
1129         int nr_frags = skb_shinfo(skb)->nr_frags;
 
1130         int len = skb->len, chunk;
 
1135         /* This is common "fast" path */
 
1136         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
 
1137                 return emac_start_xmit(skb, ndev);
 
1139         len -= skb->data_len;
 
1141         /* Note, this is only an *estimation*, we can still run out of empty
 
1142          * slots because of the additional fragmentation into
 
1143          * MAL_MAX_TX_SIZE-sized chunks
 
1145         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
 
1148         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
 
1149             emac_tx_csum(dev, skb);
 
1150         slot = dev->tx_slot;
 
1153         dev->tx_skb[slot] = NULL;
 
1154         chunk = min(len, MAL_MAX_TX_SIZE);
 
1155         dev->tx_desc[slot].data_ptr = pd =
 
1156             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
 
1157         dev->tx_desc[slot].data_len = (u16) chunk;
 
1160                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
 
1163         for (i = 0; i < nr_frags; ++i) {
 
1164                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
 
1167                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
 
1170                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
 
1173                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
 
1177         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
 
1178              dev->tx_slot, slot);
 
1180         /* Attach skb to the last slot so we don't release it too early */
 
1181         dev->tx_skb[slot] = skb;
 
1183         /* Send the packet out */
 
1184         if (dev->tx_slot == NUM_TX_BUFF - 1)
 
1185                 ctrl |= MAL_TX_CTRL_WRAP;
 
1187         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
 
1188         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
 
1190         return emac_xmit_finish(dev, skb->len);
 
1193         /* Well, too bad. Our previous estimation was overly optimistic. 
 
1196         while (slot != dev->tx_slot) {
 
1197                 dev->tx_desc[slot].ctrl = 0;
 
1200                         slot = NUM_TX_BUFF - 1;
 
1202         ++dev->estats.tx_undo;
 
1205         netif_stop_queue(ndev);
 
1206         DBG2("%d: stopped TX queue" NL, dev->def->index);
 
1210 # define emac_start_xmit_sg     emac_start_xmit
 
1211 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
 
1214 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
 
1216         struct ibm_emac_error_stats *st = &dev->estats;
 
1217         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
 
1220         if (ctrl & EMAC_TX_ST_BFCS)
 
1221                 ++st->tx_bd_bad_fcs;
 
1222         if (ctrl & EMAC_TX_ST_LCS)
 
1223                 ++st->tx_bd_carrier_loss;
 
1224         if (ctrl & EMAC_TX_ST_ED)
 
1225                 ++st->tx_bd_excessive_deferral;
 
1226         if (ctrl & EMAC_TX_ST_EC)
 
1227                 ++st->tx_bd_excessive_collisions;
 
1228         if (ctrl & EMAC_TX_ST_LC)
 
1229                 ++st->tx_bd_late_collision;
 
1230         if (ctrl & EMAC_TX_ST_MC)
 
1231                 ++st->tx_bd_multple_collisions;
 
1232         if (ctrl & EMAC_TX_ST_SC)
 
1233                 ++st->tx_bd_single_collision;
 
1234         if (ctrl & EMAC_TX_ST_UR)
 
1235                 ++st->tx_bd_underrun;
 
1236         if (ctrl & EMAC_TX_ST_SQE)
 
1240 static void emac_poll_tx(void *param)
 
1242         struct ocp_enet_private *dev = param;
 
1243         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
 
1248                 int slot = dev->ack_slot, n = 0;
 
1250                 ctrl = dev->tx_desc[slot].ctrl;
 
1251                 if (!(ctrl & MAL_TX_CTRL_READY)) {
 
1252                         struct sk_buff *skb = dev->tx_skb[slot];
 
1257                                 dev->tx_skb[slot] = NULL;
 
1259                         slot = (slot + 1) % NUM_TX_BUFF;
 
1261                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
 
1262                                 emac_parse_tx_error(dev, ctrl);
 
1268                         dev->ack_slot = slot;
 
1269                         if (netif_queue_stopped(dev->ndev) &&
 
1270                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
 
1271                                 netif_wake_queue(dev->ndev);
 
1273                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
 
1278 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
 
1281         struct sk_buff *skb = dev->rx_skb[slot];
 
1282         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
 
1285                 dma_map_single(dev->ldev, skb->data - 2, 
 
1286                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
 
1288         dev->rx_desc[slot].data_len = 0;
 
1290         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
 
1291             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
1294 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
 
1296         struct ibm_emac_error_stats *st = &dev->estats;
 
1297         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
 
1300         if (ctrl & EMAC_RX_ST_OE)
 
1301                 ++st->rx_bd_overrun;
 
1302         if (ctrl & EMAC_RX_ST_BP)
 
1303                 ++st->rx_bd_bad_packet;
 
1304         if (ctrl & EMAC_RX_ST_RP)
 
1305                 ++st->rx_bd_runt_packet;
 
1306         if (ctrl & EMAC_RX_ST_SE)
 
1307                 ++st->rx_bd_short_event;
 
1308         if (ctrl & EMAC_RX_ST_AE)
 
1309                 ++st->rx_bd_alignment_error;
 
1310         if (ctrl & EMAC_RX_ST_BFCS)
 
1311                 ++st->rx_bd_bad_fcs;
 
1312         if (ctrl & EMAC_RX_ST_PTL)
 
1313                 ++st->rx_bd_packet_too_long;
 
1314         if (ctrl & EMAC_RX_ST_ORE)
 
1315                 ++st->rx_bd_out_of_range;
 
1316         if (ctrl & EMAC_RX_ST_IRE)
 
1317                 ++st->rx_bd_in_range;
 
1320 static inline void emac_rx_csum(struct ocp_enet_private *dev,
 
1321                                 struct sk_buff *skb, u16 ctrl)
 
1323 #if defined(CONFIG_IBM_EMAC_TAH)
 
1324         if (!ctrl && dev->tah_dev) {
 
1325                 skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1326                 ++dev->stats.rx_packets_csum;
 
1331 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
 
1333         if (likely(dev->rx_sg_skb != NULL)) {
 
1334                 int len = dev->rx_desc[slot].data_len;
 
1335                 int tot_len = dev->rx_sg_skb->len + len;
 
1337                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
 
1338                         ++dev->estats.rx_dropped_mtu;
 
1339                         dev_kfree_skb(dev->rx_sg_skb);
 
1340                         dev->rx_sg_skb = NULL;
 
1342                         cacheable_memcpy(dev->rx_sg_skb->tail,
 
1343                                          dev->rx_skb[slot]->data, len);
 
1344                         skb_put(dev->rx_sg_skb, len);
 
1345                         emac_recycle_rx_skb(dev, slot, len);
 
1349         emac_recycle_rx_skb(dev, slot, 0);
 
1354 static int emac_poll_rx(void *param, int budget)
 
1356         struct ocp_enet_private *dev = param;
 
1357         int slot = dev->rx_slot, received = 0;
 
1359         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
 
1362         while (budget > 0) {
 
1364                 struct sk_buff *skb;
 
1365                 u16 ctrl = dev->rx_desc[slot].ctrl;
 
1367                 if (ctrl & MAL_RX_CTRL_EMPTY)
 
1370                 skb = dev->rx_skb[slot];
 
1372                 len = dev->rx_desc[slot].data_len;
 
1374                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
 
1377                 ctrl &= EMAC_BAD_RX_MASK;
 
1378                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
 
1379                         emac_parse_rx_error(dev, ctrl);
 
1380                         ++dev->estats.rx_dropped_error;
 
1381                         emac_recycle_rx_skb(dev, slot, 0);
 
1386                 if (len && len < EMAC_RX_COPY_THRESH) {
 
1387                         struct sk_buff *copy_skb =
 
1388                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
 
1389                         if (unlikely(!copy_skb))
 
1392                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
 
1393                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
 
1395                         emac_recycle_rx_skb(dev, slot, len);
 
1397                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
 
1402                 skb->dev = dev->ndev;
 
1403                 skb->protocol = eth_type_trans(skb, dev->ndev);
 
1404                 emac_rx_csum(dev, skb, ctrl);
 
1406                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
 
1407                         ++dev->estats.rx_dropped_stack;
 
1409                 ++dev->stats.rx_packets;
 
1411                 dev->stats.rx_bytes += len;
 
1412                 slot = (slot + 1) % NUM_RX_BUFF;
 
1417                 if (ctrl & MAL_RX_CTRL_FIRST) {
 
1418                         BUG_ON(dev->rx_sg_skb);
 
1419                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
 
1420                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
 
1421                                 ++dev->estats.rx_dropped_oom;
 
1422                                 emac_recycle_rx_skb(dev, slot, 0);
 
1424                                 dev->rx_sg_skb = skb;
 
1427                 } else if (!emac_rx_sg_append(dev, slot) &&
 
1428                            (ctrl & MAL_RX_CTRL_LAST)) {
 
1430                         skb = dev->rx_sg_skb;
 
1431                         dev->rx_sg_skb = NULL;
 
1433                         ctrl &= EMAC_BAD_RX_MASK;
 
1434                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
 
1435                                 emac_parse_rx_error(dev, ctrl);
 
1436                                 ++dev->estats.rx_dropped_error;
 
1444                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
 
1445                 /* Drop the packet and recycle skb */
 
1446                 ++dev->estats.rx_dropped_oom;
 
1447                 emac_recycle_rx_skb(dev, slot, 0);
 
1452                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
 
1453                 dev->rx_slot = slot;
 
1456         if (unlikely(budget && dev->commac.rx_stopped)) {
 
1457                 struct ocp_func_emac_data *emacdata = dev->def->additions;
 
1460                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
 
1461                         DBG2("%d: rx restart" NL, dev->def->index);
 
1466                 if (dev->rx_sg_skb) {
 
1467                         DBG2("%d: dropping partial rx packet" NL,
 
1469                         ++dev->estats.rx_dropped_error;
 
1470                         dev_kfree_skb(dev->rx_sg_skb);
 
1471                         dev->rx_sg_skb = NULL;
 
1474                 dev->commac.rx_stopped = 0;
 
1475                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
1476                 emac_rx_enable(dev);
 
1483 static int emac_peek_rx(void *param)
 
1485         struct ocp_enet_private *dev = param;
 
1486         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
 
1490 static int emac_peek_rx_sg(void *param)
 
1492         struct ocp_enet_private *dev = param;
 
1493         int slot = dev->rx_slot;
 
1495                 u16 ctrl = dev->rx_desc[slot].ctrl;
 
1496                 if (ctrl & MAL_RX_CTRL_EMPTY)
 
1498                 else if (ctrl & MAL_RX_CTRL_LAST)
 
1501                 slot = (slot + 1) % NUM_RX_BUFF;
 
1503                 /* I'm just being paranoid here :) */
 
1504                 if (unlikely(slot == dev->rx_slot))
 
1510 static void emac_rxde(void *param)
 
1512         struct ocp_enet_private *dev = param;
 
1513         ++dev->estats.rx_stopped;
 
1514         emac_rx_disable_async(dev);
 
1518 static irqreturn_t emac_irq(int irq, void *dev_instance)
 
1520         struct ocp_enet_private *dev = dev_instance;
 
1521         struct emac_regs __iomem *p = dev->emacp;
 
1522         struct ibm_emac_error_stats *st = &dev->estats;
 
1524         u32 isr = in_be32(&p->isr);
 
1525         out_be32(&p->isr, isr);
 
1527         DBG("%d: isr = %08x" NL, dev->def->index, isr);
 
1529         if (isr & EMAC_ISR_TXPE)
 
1531         if (isr & EMAC_ISR_RXPE)
 
1533         if (isr & EMAC_ISR_TXUE)
 
1535         if (isr & EMAC_ISR_RXOE)
 
1536                 ++st->rx_fifo_overrun;
 
1537         if (isr & EMAC_ISR_OVR)
 
1539         if (isr & EMAC_ISR_BP)
 
1540                 ++st->rx_bad_packet;
 
1541         if (isr & EMAC_ISR_RP)
 
1542                 ++st->rx_runt_packet;
 
1543         if (isr & EMAC_ISR_SE)
 
1544                 ++st->rx_short_event;
 
1545         if (isr & EMAC_ISR_ALE)
 
1546                 ++st->rx_alignment_error;
 
1547         if (isr & EMAC_ISR_BFCS)
 
1549         if (isr & EMAC_ISR_PTLE)
 
1550                 ++st->rx_packet_too_long;
 
1551         if (isr & EMAC_ISR_ORE)
 
1552                 ++st->rx_out_of_range;
 
1553         if (isr & EMAC_ISR_IRE)
 
1555         if (isr & EMAC_ISR_SQE)
 
1557         if (isr & EMAC_ISR_TE)
 
1563 static struct net_device_stats *emac_stats(struct net_device *ndev)
 
1565         struct ocp_enet_private *dev = ndev->priv;
 
1566         struct ibm_emac_stats *st = &dev->stats;
 
1567         struct ibm_emac_error_stats *est = &dev->estats;
 
1568         struct net_device_stats *nst = &dev->nstats;
 
1570         DBG2("%d: stats" NL, dev->def->index);
 
1572         /* Compute "legacy" statistics */
 
1573         local_irq_disable();
 
1574         nst->rx_packets = (unsigned long)st->rx_packets;
 
1575         nst->rx_bytes = (unsigned long)st->rx_bytes;
 
1576         nst->tx_packets = (unsigned long)st->tx_packets;
 
1577         nst->tx_bytes = (unsigned long)st->tx_bytes;
 
1578         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
 
1579                                           est->rx_dropped_error +
 
1580                                           est->rx_dropped_resize +
 
1581                                           est->rx_dropped_mtu);
 
1582         nst->tx_dropped = (unsigned long)est->tx_dropped;
 
1584         nst->rx_errors = (unsigned long)est->rx_bd_errors;
 
1585         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
 
1586                                               est->rx_fifo_overrun +
 
1588         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
 
1589                                                est->rx_alignment_error);
 
1590         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
 
1592         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
 
1593                                                 est->rx_bd_short_event +
 
1594                                                 est->rx_bd_packet_too_long +
 
1595                                                 est->rx_bd_out_of_range +
 
1596                                                 est->rx_bd_in_range +
 
1597                                                 est->rx_runt_packet +
 
1598                                                 est->rx_short_event +
 
1599                                                 est->rx_packet_too_long +
 
1600                                                 est->rx_out_of_range +
 
1603         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
 
1604         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
 
1606         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
 
1607         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
 
1608                                           est->tx_bd_excessive_collisions +
 
1609                                           est->tx_bd_late_collision +
 
1610                                           est->tx_bd_multple_collisions);
 
1615 static void emac_remove(struct ocp_device *ocpdev)
 
1617         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
 
1619         DBG("%d: remove" NL, dev->def->index);
 
1621         ocp_set_drvdata(ocpdev, NULL);
 
1622         unregister_netdev(dev->ndev);
 
1624         tah_fini(dev->tah_dev);
 
1625         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
 
1626         zmii_fini(dev->zmii_dev, dev->zmii_input);
 
1628         emac_dbg_register(dev->def->index, NULL);
 
1630         mal_unregister_commac(dev->mal, &dev->commac);
 
1631         iounmap(dev->emacp);
 
1635 static struct mal_commac_ops emac_commac_ops = {
 
1636         .poll_tx = &emac_poll_tx,
 
1637         .poll_rx = &emac_poll_rx,
 
1638         .peek_rx = &emac_peek_rx,
 
1642 static struct mal_commac_ops emac_commac_sg_ops = {
 
1643         .poll_tx = &emac_poll_tx,
 
1644         .poll_rx = &emac_poll_rx,
 
1645         .peek_rx = &emac_peek_rx_sg,
 
1649 /* Ethtool support */
 
1650 static int emac_ethtool_get_settings(struct net_device *ndev,
 
1651                                      struct ethtool_cmd *cmd)
 
1653         struct ocp_enet_private *dev = ndev->priv;
 
1655         cmd->supported = dev->phy.features;
 
1656         cmd->port = PORT_MII;
 
1657         cmd->phy_address = dev->phy.address;
 
1659             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
 
1662         cmd->advertising = dev->phy.advertising;
 
1663         cmd->autoneg = dev->phy.autoneg;
 
1664         cmd->speed = dev->phy.speed;
 
1665         cmd->duplex = dev->phy.duplex;
 
1671 static int emac_ethtool_set_settings(struct net_device *ndev,
 
1672                                      struct ethtool_cmd *cmd)
 
1674         struct ocp_enet_private *dev = ndev->priv;
 
1675         u32 f = dev->phy.features;
 
1677         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
 
1678             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
 
1680         /* Basic sanity checks */
 
1681         if (dev->phy.address < 0)
 
1683         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
 
1685         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
 
1687         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
 
1690         if (cmd->autoneg == AUTONEG_DISABLE) {
 
1691                 switch (cmd->speed) {
 
1693                         if (cmd->duplex == DUPLEX_HALF
 
1694                             && !(f & SUPPORTED_10baseT_Half))
 
1696                         if (cmd->duplex == DUPLEX_FULL
 
1697                             && !(f & SUPPORTED_10baseT_Full))
 
1701                         if (cmd->duplex == DUPLEX_HALF
 
1702                             && !(f & SUPPORTED_100baseT_Half))
 
1704                         if (cmd->duplex == DUPLEX_FULL
 
1705                             && !(f & SUPPORTED_100baseT_Full))
 
1709                         if (cmd->duplex == DUPLEX_HALF
 
1710                             && !(f & SUPPORTED_1000baseT_Half))
 
1712                         if (cmd->duplex == DUPLEX_FULL
 
1713                             && !(f & SUPPORTED_1000baseT_Full))
 
1721                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
 
1725                 if (!(f & SUPPORTED_Autoneg))
 
1729                 dev->phy.def->ops->setup_aneg(&dev->phy,
 
1730                                               (cmd->advertising & f) |
 
1731                                               (dev->phy.advertising &
 
1733                                                 ADVERTISED_Asym_Pause)));
 
1735         emac_force_link_update(dev);
 
1741 static void emac_ethtool_get_ringparam(struct net_device *ndev,
 
1742                                        struct ethtool_ringparam *rp)
 
1744         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
 
1745         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
 
1748 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
 
1749                                         struct ethtool_pauseparam *pp)
 
1751         struct ocp_enet_private *dev = ndev->priv;
 
1754         if ((dev->phy.features & SUPPORTED_Autoneg) &&
 
1755             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
 
1758         if (dev->phy.duplex == DUPLEX_FULL) {
 
1760                         pp->rx_pause = pp->tx_pause = 1;
 
1761                 else if (dev->phy.asym_pause)
 
1767 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
 
1769         struct ocp_enet_private *dev = ndev->priv;
 
1770         return dev->tah_dev != 0;
 
1773 static int emac_get_regs_len(struct ocp_enet_private *dev)
 
1775         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
 
1778 static int emac_ethtool_get_regs_len(struct net_device *ndev)
 
1780         struct ocp_enet_private *dev = ndev->priv;
 
1781         return sizeof(struct emac_ethtool_regs_hdr) +
 
1782             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
 
1783             zmii_get_regs_len(dev->zmii_dev) +
 
1784             rgmii_get_regs_len(dev->rgmii_dev) +
 
1785             tah_get_regs_len(dev->tah_dev);
 
1788 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
 
1790         struct emac_ethtool_regs_subhdr *hdr = buf;
 
1792         hdr->version = EMAC_ETHTOOL_REGS_VER;
 
1793         hdr->index = dev->def->index;
 
1794         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
 
1795         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
 
1798 static void emac_ethtool_get_regs(struct net_device *ndev,
 
1799                                   struct ethtool_regs *regs, void *buf)
 
1801         struct ocp_enet_private *dev = ndev->priv;
 
1802         struct emac_ethtool_regs_hdr *hdr = buf;
 
1804         hdr->components = 0;
 
1807         local_irq_disable();
 
1808         buf = mal_dump_regs(dev->mal, buf);
 
1809         buf = emac_dump_regs(dev, buf);
 
1810         if (dev->zmii_dev) {
 
1811                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
 
1812                 buf = zmii_dump_regs(dev->zmii_dev, buf);
 
1814         if (dev->rgmii_dev) {
 
1815                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
 
1816                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
 
1819                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
 
1820                 buf = tah_dump_regs(dev->tah_dev, buf);
 
1825 static int emac_ethtool_nway_reset(struct net_device *ndev)
 
1827         struct ocp_enet_private *dev = ndev->priv;
 
1830         DBG("%d: nway_reset" NL, dev->def->index);
 
1832         if (dev->phy.address < 0)
 
1836         if (!dev->phy.autoneg) {
 
1841         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
 
1842         emac_force_link_update(dev);
 
1849 static int emac_ethtool_get_stats_count(struct net_device *ndev)
 
1851         return EMAC_ETHTOOL_STATS_COUNT;
 
1854 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
 
1857         if (stringset == ETH_SS_STATS)
 
1858                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
 
1861 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
 
1862                                            struct ethtool_stats *estats,
 
1865         struct ocp_enet_private *dev = ndev->priv;
 
1866         local_irq_disable();
 
1867         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
 
1868         tmp_stats += sizeof(dev->stats) / sizeof(u64);
 
1869         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
 
1873 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
 
1874                                      struct ethtool_drvinfo *info)
 
1876         struct ocp_enet_private *dev = ndev->priv;
 
1878         strcpy(info->driver, "ibm_emac");
 
1879         strcpy(info->version, DRV_VERSION);
 
1880         info->fw_version[0] = '\0';
 
1881         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
 
1882         info->n_stats = emac_ethtool_get_stats_count(ndev);
 
1883         info->regdump_len = emac_ethtool_get_regs_len(ndev);
 
1886 static const struct ethtool_ops emac_ethtool_ops = {
 
1887         .get_settings = emac_ethtool_get_settings,
 
1888         .set_settings = emac_ethtool_set_settings,
 
1889         .get_drvinfo = emac_ethtool_get_drvinfo,
 
1891         .get_regs_len = emac_ethtool_get_regs_len,
 
1892         .get_regs = emac_ethtool_get_regs,
 
1894         .nway_reset = emac_ethtool_nway_reset,
 
1896         .get_ringparam = emac_ethtool_get_ringparam,
 
1897         .get_pauseparam = emac_ethtool_get_pauseparam,
 
1899         .get_rx_csum = emac_ethtool_get_rx_csum,
 
1901         .get_strings = emac_ethtool_get_strings,
 
1902         .get_stats_count = emac_ethtool_get_stats_count,
 
1903         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
 
1905         .get_link = ethtool_op_get_link,
 
1906         .get_tx_csum = ethtool_op_get_tx_csum,
 
1907         .get_sg = ethtool_op_get_sg,
 
1910 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 
1912         struct ocp_enet_private *dev = ndev->priv;
 
1913         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
 
1915         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
 
1917         if (dev->phy.address < 0)
 
1922         case SIOCDEVPRIVATE:
 
1923                 data[0] = dev->phy.address;
 
1926         case SIOCDEVPRIVATE + 1:
 
1927                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
 
1931         case SIOCDEVPRIVATE + 2:
 
1932                 if (!capable(CAP_NET_ADMIN))
 
1934                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
 
1941 static int __init emac_probe(struct ocp_device *ocpdev)
 
1943         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
 
1944         struct net_device *ndev;
 
1945         struct ocp_device *maldev;
 
1946         struct ocp_enet_private *dev;
 
1949         DBG("%d: probe" NL, ocpdev->def->index);
 
1952                 printk(KERN_ERR "emac%d: Missing additional data!\n",
 
1953                        ocpdev->def->index);
 
1957         /* Allocate our net_device structure */
 
1958         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
 
1960                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
 
1961                        ocpdev->def->index);
 
1966         dev->ldev = &ocpdev->dev;
 
1967         dev->def = ocpdev->def;
 
1968         SET_MODULE_OWNER(ndev);
 
1970         /* Find MAL device we are connected to */
 
1972             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
 
1974                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
 
1975                        dev->def->index, emacdata->mal_idx);
 
1979         dev->mal = ocp_get_drvdata(maldev);
 
1981                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
 
1982                        dev->def->index, emacdata->mal_idx);
 
1987         /* Register with MAL */
 
1988         dev->commac.ops = &emac_commac_ops;
 
1989         dev->commac.dev = dev;
 
1990         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
 
1991         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
 
1992         err = mal_register_commac(dev->mal, &dev->commac);
 
1994                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
 
1995                        dev->def->index, emacdata->mal_idx);
 
1998         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
 
1999         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
 
2001         /* Get pointers to BD rings */
 
2003             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
 
2004                                                  emacdata->mal_tx_chan);
 
2006             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
 
2007                                                  emacdata->mal_rx_chan);
 
2009         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
 
2010         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
 
2013         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
 
2014         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
 
2016         /* If we depend on another EMAC for MDIO, check whether it was probed already */
 
2017         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
 
2018                 struct ocp_device *mdiodev =
 
2019                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
 
2020                                     emacdata->mdio_idx);
 
2022                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
 
2023                                dev->def->index, emacdata->mdio_idx);
 
2027                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
 
2028                 if (!dev->mdio_dev) {
 
2030                                "emac%d: emac%d hasn't been initialized yet!\n",
 
2031                                dev->def->index, emacdata->mdio_idx);
 
2037         /* Attach to ZMII, if needed */
 
2038         if ((err = zmii_attach(dev)) != 0)
 
2041         /* Attach to RGMII, if needed */
 
2042         if ((err = rgmii_attach(dev)) != 0)
 
2045         /* Attach to TAH, if needed */
 
2046         if ((err = tah_attach(dev)) != 0)
 
2050         dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
 
2052                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
 
2058         /* Fill in MAC address */
 
2059         for (i = 0; i < 6; ++i)
 
2060                 ndev->dev_addr[i] = emacdata->mac_addr[i];
 
2062         /* Set some link defaults before we can find out real parameters */
 
2063         dev->phy.speed = SPEED_100;
 
2064         dev->phy.duplex = DUPLEX_FULL;
 
2065         dev->phy.autoneg = AUTONEG_DISABLE;
 
2066         dev->phy.pause = dev->phy.asym_pause = 0;
 
2067         dev->stop_timeout = STOP_TIMEOUT_100;
 
2068         init_timer(&dev->link_timer);
 
2069         dev->link_timer.function = emac_link_timer;
 
2070         dev->link_timer.data = (unsigned long)dev;
 
2072         /* Find PHY if any */
 
2073         dev->phy.dev = ndev;
 
2074         dev->phy.mode = emacdata->phy_mode;
 
2075         if (emacdata->phy_map != 0xffffffff) {
 
2076                 u32 phy_map = emacdata->phy_map | busy_phy_map;
 
2079                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
 
2080                     emacdata->phy_map, busy_phy_map);
 
2082                 EMAC_RX_CLK_TX(dev->def->index);
 
2084                 dev->phy.mdio_read = emac_mdio_read;
 
2085                 dev->phy.mdio_write = emac_mdio_write;
 
2087                 /* Configure EMAC with defaults so we can at least use MDIO
 
2088                  * This is needed mostly for 440GX
 
2090                 if (emac_phy_gpcs(dev->phy.mode)) {
 
2092                          * Make GPCS PHY address equal to EMAC index.
 
2093                          * We probably should take into account busy_phy_map
 
2094                          * and/or phy_map here.
 
2096                         dev->phy.address = dev->def->index;
 
2099                 emac_configure(dev);
 
2101                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
 
2102                         if (!(phy_map & 1)) {
 
2104                                 busy_phy_map |= 1 << i;
 
2106                                 /* Quick check if there is a PHY at the address */
 
2107                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
 
2108                                 if (r == 0xffff || r < 0)
 
2110                                 if (!mii_phy_probe(&dev->phy, i))
 
2114                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
 
2120                 if (dev->phy.def->ops->init)
 
2121                         dev->phy.def->ops->init(&dev->phy);
 
2123                 /* Disable any PHY features not supported by the platform */
 
2124                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
 
2126                 /* Setup initial link parameters */
 
2127                 if (dev->phy.features & SUPPORTED_Autoneg) {
 
2128                         adv = dev->phy.features;
 
2129 #if !defined(CONFIG_40x)
 
2130                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
 
2132                         /* Restart autonegotiation */
 
2133                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
 
2135                         u32 f = dev->phy.def->features;
 
2136                         int speed = SPEED_10, fd = DUPLEX_HALF;
 
2138                         /* Select highest supported speed/duplex */
 
2139                         if (f & SUPPORTED_1000baseT_Full) {
 
2142                         } else if (f & SUPPORTED_1000baseT_Half)
 
2144                         else if (f & SUPPORTED_100baseT_Full) {
 
2147                         } else if (f & SUPPORTED_100baseT_Half)
 
2149                         else if (f & SUPPORTED_10baseT_Full)
 
2152                         /* Force link parameters */
 
2153                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
 
2158                 /* PHY-less configuration.
 
2159                  * XXX I probably should move these settings to emacdata
 
2161                 dev->phy.address = -1;
 
2162                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
 
2166         /* Fill in the driver function table */
 
2167         ndev->open = &emac_open;
 
2169                 ndev->hard_start_xmit = &emac_start_xmit_sg;
 
2170                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 
2172                 ndev->hard_start_xmit = &emac_start_xmit;
 
2173         ndev->tx_timeout = &emac_full_tx_reset;
 
2174         ndev->watchdog_timeo = 5 * HZ;
 
2175         ndev->stop = &emac_close;
 
2176         ndev->get_stats = &emac_stats;
 
2177         ndev->set_multicast_list = &emac_set_multicast_list;
 
2178         ndev->do_ioctl = &emac_ioctl;
 
2179         if (emac_phy_supports_gige(emacdata->phy_mode)) {
 
2180                 ndev->change_mtu = &emac_change_mtu;
 
2181                 dev->commac.ops = &emac_commac_sg_ops;
 
2183         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
 
2185         netif_carrier_off(ndev);
 
2186         netif_stop_queue(ndev);
 
2188         err = register_netdev(ndev);
 
2190                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
 
2191                        dev->def->index, err);
 
2195         ocp_set_drvdata(ocpdev, dev);
 
2197         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
 
2198                ndev->name, dev->def->index,
 
2199                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
 
2200                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
 
2202         if (dev->phy.address >= 0)
 
2203                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
 
2204                        dev->phy.def->name, dev->phy.address);
 
2206         emac_dbg_register(dev->def->index, dev);
 
2210         iounmap(dev->emacp);
 
2212         tah_fini(dev->tah_dev);
 
2214         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
 
2216         zmii_fini(dev->zmii_dev, dev->zmii_input);
 
2218         mal_unregister_commac(dev->mal, &dev->commac);
 
2224 static struct ocp_device_id emac_ids[] = {
 
2225         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
 
2226         { .vendor = OCP_VENDOR_INVALID}
 
2229 static struct ocp_driver emac_driver = {
 
2231         .id_table = emac_ids,
 
2232         .probe = emac_probe,
 
2233         .remove = emac_remove,
 
2236 static int __init emac_init(void)
 
2238         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
 
2246         if (ocp_register_driver(&emac_driver)) {
 
2248                 ocp_unregister_driver(&emac_driver);
 
2258 static void __exit emac_exit(void)
 
2261         ocp_unregister_driver(&emac_driver);
 
2266 module_init(emac_init);
 
2267 module_exit(emac_exit);