2  * drivers/net/ibm_emac/ibm_emac_core.c
 
   4  * Driver for PowerPC 4xx on-chip ethernet controller.
 
   6  * Copyright (c) 2004, 2005 Zultys Technologies.
 
   7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
 
   9  * Based on original work by
 
  10  *      Matt Porter <mporter@kernel.crashing.org>
 
  11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
 
  12  *      Armin Kuster <akuster@mvista.com>
 
  13  *      Johnnie Peters <jpeters@mvista.com>
 
  15  * This program is free software; you can redistribute  it and/or modify it
 
  16  * under  the terms of  the GNU General  Public License as published by the
 
  17  * Free Software Foundation;  either version 2 of the  License, or (at your
 
  18  * option) any later version.
 
  22 #include <linux/module.h>
 
  23 #include <linux/kernel.h>
 
  24 #include <linux/string.h>
 
  25 #include <linux/errno.h>
 
  26 #include <linux/interrupt.h>
 
  27 #include <linux/delay.h>
 
  28 #include <linux/init.h>
 
  29 #include <linux/types.h>
 
  30 #include <linux/netdevice.h>
 
  31 #include <linux/etherdevice.h>
 
  32 #include <linux/skbuff.h>
 
  33 #include <linux/crc32.h>
 
  34 #include <linux/ethtool.h>
 
  35 #include <linux/mii.h>
 
  36 #include <linux/bitops.h>
 
  38 #include <asm/processor.h>
 
  41 #include <asm/uaccess.h>
 
  44 #include "ibm_emac_core.h"
 
  45 #include "ibm_emac_debug.h"
 
  48  * Lack of dma_unmap_???? calls is intentional.
 
  50  * API-correct usage requires additional support state information to be 
 
  51  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
 
  52  * EMAC design (e.g. TX buffer passed from network stack can be split into
 
  53  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
 
  54  * maintaining such information will add additional overhead.
 
  55  * Current DMA API implementation for 4xx processors only ensures cache coherency
 
  56  * and dma_unmap_???? routines are empty and are likely to stay this way.
 
  57  * I decided to omit dma_unmap_??? calls because I don't want to add additional
 
  58  * complexity just for the sake of following some abstract API, when it doesn't
 
  59  * add any real benefit to the driver. I understand that this decision maybe 
 
  60  * controversial, but I really tried to make code API-correct and efficient 
 
  61  * at the same time and didn't come up with code I liked :(.                --ebs
 
  64 #define DRV_NAME        "emac"
 
  65 #define DRV_VERSION     "3.54"
 
  66 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
 
  68 MODULE_DESCRIPTION(DRV_DESC);
 
  70     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
 
  71 MODULE_LICENSE("GPL");
 
  73 /* minimum number of free TX descriptors required to wake up TX process */
 
  74 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
 
  76 /* If packet size is less than this number, we allocate small skb and copy packet 
 
  77  * contents into it instead of just sending original big skb up
 
  79 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
 
  81 /* Since multiple EMACs share MDIO lines in various ways, we need
 
  82  * to avoid re-using the same PHY ID in cases where the arch didn't
 
  83  * setup precise phy_map entries
 
  85 static u32 busy_phy_map;
 
  87 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
 
  88     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
 
  89 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
 
  90  * with PHY RX clock problem.
 
  91  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
 
  92  * also allows controlling each EMAC clock
 
  94 static inline void EMAC_RX_CLK_TX(int idx)
 
  97         local_irq_save(flags);
 
  99 #if defined(CONFIG_405EP)
 
 100         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
 
 101 #else /* CONFIG_440EP || CONFIG_440GR */
 
 102         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
 
 105         local_irq_restore(flags);
 
 108 static inline void EMAC_RX_CLK_DEFAULT(int idx)
 
 111         local_irq_save(flags);
 
 113 #if defined(CONFIG_405EP)
 
 114         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
 
 115 #else /* CONFIG_440EP */
 
 116         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
 
 119         local_irq_restore(flags);
 
 122 #define EMAC_RX_CLK_TX(idx)             ((void)0)
 
 123 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
 
 126 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
 
 127 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
 
 128  * unfortunately this is less flexible than 440EP case, because it's a global 
 
 129  * setting for all EMACs, therefore we do this clock trick only during probe.
 
 131 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
 
 132                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
 
 133 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
 
 134                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
 
 136 #define EMAC_CLK_INTERNAL               ((void)0)
 
 137 #define EMAC_CLK_EXTERNAL               ((void)0)
 
 140 /* I don't want to litter system log with timeout errors 
 
 141  * when we have brain-damaged PHY.
 
 143 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
 
 146 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
 
 147         DBG("%d: %s" NL, dev->def->index, error);
 
 150                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
 
 154 /* PHY polling intervals */
 
 155 #define PHY_POLL_LINK_ON        HZ
 
 156 #define PHY_POLL_LINK_OFF       (HZ / 5)
 
 158 /* Graceful stop timeouts in us. 
 
 159  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
 
 161 #define STOP_TIMEOUT_10         1230    
 
 162 #define STOP_TIMEOUT_100        124
 
 163 #define STOP_TIMEOUT_1000       13
 
 164 #define STOP_TIMEOUT_1000_JUMBO 73
 
 166 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
 
 167 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
 
 168         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
 
 169         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
 
 170         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
 
 171         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
 
 172         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
 
 173         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
 
 174         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
 
 175         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
 
 176         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
 
 177         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
 
 178         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
 
 179         "tx_bd_excessive_collisions", "tx_bd_late_collision",
 
 180         "tx_bd_multple_collisions", "tx_bd_single_collision",
 
 181         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
 
 185 static irqreturn_t emac_irq(int irq, void *dev_instance);
 
 186 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
 
 188 static inline int emac_phy_supports_gige(int phy_mode)
 
 190         return  phy_mode == PHY_MODE_GMII ||
 
 191                 phy_mode == PHY_MODE_RGMII ||
 
 192                 phy_mode == PHY_MODE_TBI ||
 
 193                 phy_mode == PHY_MODE_RTBI;
 
 196 static inline int emac_phy_gpcs(int phy_mode)
 
 198         return  phy_mode == PHY_MODE_TBI ||
 
 199                 phy_mode == PHY_MODE_RTBI;
 
 202 static inline void emac_tx_enable(struct ocp_enet_private *dev)
 
 204         struct emac_regs __iomem *p = dev->emacp;
 
 208         local_irq_save(flags);
 
 210         DBG("%d: tx_enable" NL, dev->def->index);
 
 212         r = in_be32(&p->mr0);
 
 213         if (!(r & EMAC_MR0_TXE))
 
 214                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
 
 215         local_irq_restore(flags);
 
 218 static void emac_tx_disable(struct ocp_enet_private *dev)
 
 220         struct emac_regs __iomem *p = dev->emacp;
 
 224         local_irq_save(flags);
 
 226         DBG("%d: tx_disable" NL, dev->def->index);
 
 228         r = in_be32(&p->mr0);
 
 229         if (r & EMAC_MR0_TXE) {
 
 230                 int n = dev->stop_timeout;
 
 231                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
 
 232                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
 
 237                         emac_report_timeout_error(dev, "TX disable timeout");
 
 239         local_irq_restore(flags);
 
 242 static void emac_rx_enable(struct ocp_enet_private *dev)
 
 244         struct emac_regs __iomem *p = dev->emacp;
 
 248         local_irq_save(flags);
 
 249         if (unlikely(dev->commac.rx_stopped))
 
 252         DBG("%d: rx_enable" NL, dev->def->index);
 
 254         r = in_be32(&p->mr0);
 
 255         if (!(r & EMAC_MR0_RXE)) {
 
 256                 if (unlikely(!(r & EMAC_MR0_RXI))) {
 
 257                         /* Wait if previous async disable is still in progress */
 
 258                         int n = dev->stop_timeout;
 
 259                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 
 264                                 emac_report_timeout_error(dev,
 
 265                                                           "RX disable timeout");
 
 267                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
 
 270         local_irq_restore(flags);
 
 273 static void emac_rx_disable(struct ocp_enet_private *dev)
 
 275         struct emac_regs __iomem *p = dev->emacp;
 
 279         local_irq_save(flags);
 
 281         DBG("%d: rx_disable" NL, dev->def->index);
 
 283         r = in_be32(&p->mr0);
 
 284         if (r & EMAC_MR0_RXE) {
 
 285                 int n = dev->stop_timeout;
 
 286                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 
 287                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 
 292                         emac_report_timeout_error(dev, "RX disable timeout");
 
 294         local_irq_restore(flags);
 
 297 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
 
 299         struct emac_regs __iomem *p = dev->emacp;
 
 303         local_irq_save(flags);
 
 305         DBG("%d: rx_disable_async" NL, dev->def->index);
 
 307         r = in_be32(&p->mr0);
 
 308         if (r & EMAC_MR0_RXE)
 
 309                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 
 310         local_irq_restore(flags);
 
 313 static int emac_reset(struct ocp_enet_private *dev)
 
 315         struct emac_regs __iomem *p = dev->emacp;
 
 319         DBG("%d: reset" NL, dev->def->index);
 
 321         local_irq_save(flags);
 
 323         if (!dev->reset_failed) {
 
 324                 /* 40x erratum suggests stopping RX channel before reset,
 
 327                 emac_rx_disable(dev);
 
 328                 emac_tx_disable(dev);
 
 331         out_be32(&p->mr0, EMAC_MR0_SRST);
 
 332         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
 
 334         local_irq_restore(flags);
 
 337                 dev->reset_failed = 0;
 
 340                 emac_report_timeout_error(dev, "reset timeout");
 
 341                 dev->reset_failed = 1;
 
 346 static void emac_hash_mc(struct ocp_enet_private *dev)
 
 348         struct emac_regs __iomem *p = dev->emacp;
 
 350         struct dev_mc_list *dmi;
 
 352         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
 
 354         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
 
 356                 DECLARE_MAC_BUF(mac);
 
 358                      dev->def->index, print_mac(mac, dmi->dmi_addr));
 
 360                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
 
 361                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
 
 363         out_be32(&p->gaht1, gaht[0]);
 
 364         out_be32(&p->gaht2, gaht[1]);
 
 365         out_be32(&p->gaht3, gaht[2]);
 
 366         out_be32(&p->gaht4, gaht[3]);
 
 369 static inline u32 emac_iff2rmr(struct net_device *ndev)
 
 371         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
 
 374         if (ndev->flags & IFF_PROMISC)
 
 376         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
 
 378         else if (ndev->mc_count > 0)
 
 384 static inline int emac_opb_mhz(void)
 
 386         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
 
 390 static int emac_configure(struct ocp_enet_private *dev)
 
 392         struct emac_regs __iomem *p = dev->emacp;
 
 393         struct net_device *ndev = dev->ndev;
 
 397         DBG("%d: configure" NL, dev->def->index);
 
 399         if (emac_reset(dev) < 0)
 
 402         tah_reset(dev->tah_dev);
 
 405         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
 
 406         if (dev->phy.duplex == DUPLEX_FULL)
 
 407                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
 
 408         dev->stop_timeout = STOP_TIMEOUT_10;
 
 409         switch (dev->phy.speed) {
 
 411                 if (emac_phy_gpcs(dev->phy.mode)) {
 
 412                         r |= EMAC_MR1_MF_1000GPCS |
 
 413                             EMAC_MR1_MF_IPPA(dev->phy.address);
 
 415                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
 
 416                          * identify this GPCS PHY later.
 
 418                         out_be32(&p->ipcr, 0xdeadbeef);
 
 420                         r |= EMAC_MR1_MF_1000;
 
 421                 r |= EMAC_MR1_RFS_16K;
 
 424                 if (dev->ndev->mtu > ETH_DATA_LEN) {
 
 426                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
 
 428                         dev->stop_timeout = STOP_TIMEOUT_1000;
 
 431                 r |= EMAC_MR1_MF_100;
 
 432                 dev->stop_timeout = STOP_TIMEOUT_100;
 
 435                 r |= EMAC_MR1_RFS_4K;
 
 441                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
 
 444                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
 
 446 #if !defined(CONFIG_40x)
 
 447         /* on 40x erratum forces us to NOT use integrated flow control, 
 
 448          * let's hope it works on 44x ;)
 
 450         if (dev->phy.duplex == DUPLEX_FULL) {
 
 452                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
 
 453                 else if (dev->phy.asym_pause)
 
 457         out_be32(&p->mr1, r);
 
 459         /* Set individual MAC address */
 
 460         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
 
 461         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
 
 462                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
 
 465         /* VLAN Tag Protocol ID */
 
 466         out_be32(&p->vtpid, 0x8100);
 
 468         /* Receive mode register */
 
 469         r = emac_iff2rmr(ndev);
 
 470         if (r & EMAC_RMR_MAE)
 
 472         out_be32(&p->rmr, r);
 
 474         /* FIFOs thresholds */
 
 475         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
 
 476                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
 
 477         out_be32(&p->tmr1, r);
 
 478         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
 
 480         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
 
 481            there should be still enough space in FIFO to allow the our link
 
 482            partner time to process this frame and also time to send PAUSE 
 
 485            Here is the worst case scenario for the RX FIFO "headroom"
 
 486            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
 
 488            1) One maximum-length frame on TX                    1522 bytes
 
 489            2) One PAUSE frame time                                64 bytes
 
 490            3) PAUSE frame decode time allowance                   64 bytes
 
 491            4) One maximum-length frame on RX                    1522 bytes
 
 492            5) Round-trip propagation delay of the link (100Mb)    15 bytes
 
 496            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
 
 497            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
 
 499         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
 
 500                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
 
 501         out_be32(&p->rwmr, r);
 
 503         /* Set PAUSE timer to the maximum */
 
 504         out_be32(&p->ptr, 0xffff);
 
 507         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
 
 508                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
 
 509                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
 
 510                  EMAC_ISR_IRE | EMAC_ISR_TE);
 
 512         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
 
 513         if (emac_phy_gpcs(dev->phy.mode)) 
 
 514                 mii_reset_phy(&dev->phy);
 
 520 static void emac_reinitialize(struct ocp_enet_private *dev)
 
 522         DBG("%d: reinitialize" NL, dev->def->index);
 
 524         if (!emac_configure(dev)) {
 
 531 static void emac_full_tx_reset(struct net_device *ndev)
 
 533         struct ocp_enet_private *dev = ndev->priv;
 
 534         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 536         DBG("%d: full_tx_reset" NL, dev->def->index);
 
 538         emac_tx_disable(dev);
 
 539         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 540         emac_clean_tx_ring(dev);
 
 541         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
 
 545         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 549         netif_wake_queue(ndev);
 
 552 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
 
 554         struct emac_regs __iomem *p = dev->emacp;
 
 558         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
 
 560         /* Enable proper MDIO port */
 
 561         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
 
 563         /* Wait for management interface to become idle */
 
 565         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 571         /* Issue read command */
 
 573                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
 
 574                  (reg & EMAC_STACR_PRA_MASK)
 
 575                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
 
 578         /* Wait for read to complete */
 
 580         while (!emac_phy_done(r = in_be32(&p->stacr))) {
 
 586         if (unlikely(r & EMAC_STACR_PHYE)) {
 
 587                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
 
 592         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
 
 593         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
 
 596         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
 
 600 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
 
 603         struct emac_regs __iomem *p = dev->emacp;
 
 606         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
 
 609         /* Enable proper MDIO port */
 
 610         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
 
 612         /* Wait for management interface to be idle */
 
 614         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 620         /* Issue write command */
 
 622                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
 
 623                  (reg & EMAC_STACR_PRA_MASK) |
 
 624                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
 
 625                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
 
 627         /* Wait for write to complete */
 
 629         while (!emac_phy_done(in_be32(&p->stacr))) {
 
 636         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
 
 639 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
 
 641         struct ocp_enet_private *dev = ndev->priv;
 
 645         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
 
 651 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
 
 653         struct ocp_enet_private *dev = ndev->priv;
 
 656         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
 
 657                           (u8) reg, (u16) val);
 
 662 static void emac_set_multicast_list(struct net_device *ndev)
 
 664         struct ocp_enet_private *dev = ndev->priv;
 
 665         struct emac_regs __iomem *p = dev->emacp;
 
 666         u32 rmr = emac_iff2rmr(ndev);
 
 668         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
 
 669         BUG_ON(!netif_running(dev->ndev));
 
 671         /* I decided to relax register access rules here to avoid
 
 674          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
 
 675          * in MR1 register and do a full EMAC reset.
 
 676          * One TX BD status update is delayed and, after EMAC reset, it 
 
 677          * never happens, resulting in TX hung (it'll be recovered by TX 
 
 678          * timeout handler eventually, but this is just gross).
 
 679          * So we either have to do full TX reset or try to cheat here :)
 
 681          * The only required change is to RX mode register, so I *think* all
 
 682          * we need is just to stop RX channel. This seems to work on all
 
 685         emac_rx_disable(dev);
 
 686         if (rmr & EMAC_RMR_MAE)
 
 688         out_be32(&p->rmr, rmr);
 
 693 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
 
 695         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 696         int rx_sync_size = emac_rx_sync_size(new_mtu);
 
 697         int rx_skb_size = emac_rx_skb_size(new_mtu);
 
 700         emac_rx_disable(dev);
 
 701         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 703         if (dev->rx_sg_skb) {
 
 704                 ++dev->estats.rx_dropped_resize;
 
 705                 dev_kfree_skb(dev->rx_sg_skb);
 
 706                 dev->rx_sg_skb = NULL;
 
 709         /* Make a first pass over RX ring and mark BDs ready, dropping 
 
 710          * non-processed packets on the way. We need this as a separate pass
 
 711          * to simplify error recovery in the case of allocation failure later.
 
 713         for (i = 0; i < NUM_RX_BUFF; ++i) {
 
 714                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
 
 715                         ++dev->estats.rx_dropped_resize;
 
 717                 dev->rx_desc[i].data_len = 0;
 
 718                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
 
 719                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
 722         /* Reallocate RX ring only if bigger skb buffers are required */
 
 723         if (rx_skb_size <= dev->rx_skb_size)
 
 726         /* Second pass, allocate new skbs */
 
 727         for (i = 0; i < NUM_RX_BUFF; ++i) {
 
 728                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
 
 734                 BUG_ON(!dev->rx_skb[i]);
 
 735                 dev_kfree_skb(dev->rx_skb[i]);
 
 737                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
 
 738                 dev->rx_desc[i].data_ptr =
 
 739                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
 
 740                                    DMA_FROM_DEVICE) + 2;
 
 741                 dev->rx_skb[i] = skb;
 
 744         /* Check if we need to change "Jumbo" bit in MR1 */
 
 745         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
 
 746                 /* This is to prevent starting RX channel in emac_rx_enable() */
 
 747                 dev->commac.rx_stopped = 1;
 
 749                 dev->ndev->mtu = new_mtu;
 
 750                 emac_full_tx_reset(dev->ndev);
 
 753         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
 
 756         dev->commac.rx_stopped = dev->rx_slot = 0;
 
 757         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 763 /* Process ctx, rtnl_lock semaphore */
 
 764 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
 
 766         struct ocp_enet_private *dev = ndev->priv;
 
 769         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
 
 772         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
 
 775         if (netif_running(ndev)) {
 
 776                 /* Check if we really need to reinitalize RX ring */
 
 777                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
 
 778                         ret = emac_resize_rx_ring(dev, new_mtu);
 
 783                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
 
 784                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
 
 791 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
 
 794         for (i = 0; i < NUM_TX_BUFF; ++i) {
 
 795                 if (dev->tx_skb[i]) {
 
 796                         dev_kfree_skb(dev->tx_skb[i]);
 
 797                         dev->tx_skb[i] = NULL;
 
 798                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
 
 799                                 ++dev->estats.tx_dropped;
 
 801                 dev->tx_desc[i].ctrl = 0;
 
 802                 dev->tx_desc[i].data_ptr = 0;
 
 806 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
 
 809         for (i = 0; i < NUM_RX_BUFF; ++i)
 
 810                 if (dev->rx_skb[i]) {
 
 811                         dev->rx_desc[i].ctrl = 0;
 
 812                         dev_kfree_skb(dev->rx_skb[i]);
 
 813                         dev->rx_skb[i] = NULL;
 
 814                         dev->rx_desc[i].data_ptr = 0;
 
 817         if (dev->rx_sg_skb) {
 
 818                 dev_kfree_skb(dev->rx_sg_skb);
 
 819                 dev->rx_sg_skb = NULL;
 
 823 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
 
 826         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
 
 830         dev->rx_skb[slot] = skb;
 
 831         dev->rx_desc[slot].data_len = 0;
 
 833         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
 
 834         dev->rx_desc[slot].data_ptr = 
 
 835             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
 
 836                            DMA_FROM_DEVICE) + 2;
 
 838         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
 
 839             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
 844 static void emac_print_link_status(struct ocp_enet_private *dev)
 
 846         if (netif_carrier_ok(dev->ndev))
 
 847                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
 
 848                        dev->ndev->name, dev->phy.speed,
 
 849                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
 
 850                        dev->phy.pause ? ", pause enabled" :
 
 851                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
 
 853                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
 
 856 /* Process ctx, rtnl_lock semaphore */
 
 857 static int emac_open(struct net_device *ndev)
 
 859         struct ocp_enet_private *dev = ndev->priv;
 
 860         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
 863         DBG("%d: open" NL, dev->def->index);
 
 865         /* Setup error IRQ handler */
 
 866         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
 
 868                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
 
 869                        ndev->name, dev->def->irq);
 
 873         /* Allocate RX ring */
 
 874         for (i = 0; i < NUM_RX_BUFF; ++i)
 
 875                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
 
 876                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
 
 882         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
 
 883             dev->commac.rx_stopped = 0;
 
 884         dev->rx_sg_skb = NULL;
 
 886         if (dev->phy.address >= 0) {
 
 887                 int link_poll_interval;
 
 888                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
 
 889                         dev->phy.def->ops->read_link(&dev->phy);
 
 890                         EMAC_RX_CLK_DEFAULT(dev->def->index);
 
 891                         netif_carrier_on(dev->ndev);
 
 892                         link_poll_interval = PHY_POLL_LINK_ON;
 
 894                         EMAC_RX_CLK_TX(dev->def->index);
 
 895                         netif_carrier_off(dev->ndev);
 
 896                         link_poll_interval = PHY_POLL_LINK_OFF;
 
 898                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
 
 899                 emac_print_link_status(dev);
 
 901                 netif_carrier_on(dev->ndev);
 
 904         mal_poll_add(dev->mal, &dev->commac);
 
 905         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
 906         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
 
 907         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
 910         netif_start_queue(ndev);
 
 915         emac_clean_rx_ring(dev);
 
 916         free_irq(dev->def->irq, dev);
 
 921 static int emac_link_differs(struct ocp_enet_private *dev)
 
 923         u32 r = in_be32(&dev->emacp->mr1);
 
 925         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
 
 926         int speed, pause, asym_pause;
 
 928         if (r & EMAC_MR1_MF_1000)
 
 930         else if (r & EMAC_MR1_MF_100)
 
 935         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
 
 936         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
 
 945                 pause = asym_pause = 0;
 
 947         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
 
 948             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
 
 952 static void emac_link_timer(unsigned long data)
 
 954         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
 
 955         int link_poll_interval;
 
 957         DBG2("%d: link timer" NL, dev->def->index);
 
 959         if (dev->phy.def->ops->poll_link(&dev->phy)) {
 
 960                 if (!netif_carrier_ok(dev->ndev)) {
 
 961                         EMAC_RX_CLK_DEFAULT(dev->def->index);
 
 963                         /* Get new link parameters */
 
 964                         dev->phy.def->ops->read_link(&dev->phy);
 
 966                         if (dev->tah_dev || emac_link_differs(dev))
 
 967                                 emac_full_tx_reset(dev->ndev);
 
 969                         netif_carrier_on(dev->ndev);
 
 970                         emac_print_link_status(dev);
 
 972                 link_poll_interval = PHY_POLL_LINK_ON;
 
 974                 if (netif_carrier_ok(dev->ndev)) {
 
 975                         EMAC_RX_CLK_TX(dev->def->index);
 
 976 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
 
 977                         emac_reinitialize(dev);
 
 979                         netif_carrier_off(dev->ndev);
 
 980                         emac_print_link_status(dev);
 
 983                 /* Retry reset if the previous attempt failed.
 
 984                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
 
 985                  * case, but I left it here because it shouldn't trigger for
 
 988                 if (unlikely(dev->reset_failed))
 
 989                         emac_reinitialize(dev);
 
 991                 link_poll_interval = PHY_POLL_LINK_OFF;
 
 993         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
 
 997 static void emac_force_link_update(struct ocp_enet_private *dev)
 
 999         netif_carrier_off(dev->ndev);
 
1000         if (timer_pending(&dev->link_timer))
 
1001                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
 
1004 /* Process ctx, rtnl_lock semaphore */
 
1005 static int emac_close(struct net_device *ndev)
 
1007         struct ocp_enet_private *dev = ndev->priv;
 
1008         struct ocp_func_emac_data *emacdata = dev->def->additions;
 
1010         DBG("%d: close" NL, dev->def->index);
 
1014         if (dev->phy.address >= 0)
 
1015                 del_timer_sync(&dev->link_timer);
 
1017         netif_stop_queue(ndev);
 
1018         emac_rx_disable(dev);
 
1019         emac_tx_disable(dev);
 
1020         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
1021         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
 
1022         mal_poll_del(dev->mal, &dev->commac);
 
1025         emac_clean_tx_ring(dev);
 
1026         emac_clean_rx_ring(dev);
 
1027         free_irq(dev->def->irq, dev);
 
1032 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
 
1033                                struct sk_buff *skb)
 
1035 #if defined(CONFIG_IBM_EMAC_TAH)
 
1036         if (skb->ip_summed == CHECKSUM_PARTIAL) {
 
1037                 ++dev->stats.tx_packets_csum;
 
1038                 return EMAC_TX_CTRL_TAH_CSUM;
 
1044 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
 
1046         struct emac_regs __iomem *p = dev->emacp;
 
1047         struct net_device *ndev = dev->ndev;
 
1049         /* Send the packet out */
 
1050         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
 
1052         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
 
1053                 netif_stop_queue(ndev);
 
1054                 DBG2("%d: stopped TX queue" NL, dev->def->index);
 
1057         ndev->trans_start = jiffies;
 
1058         ++dev->stats.tx_packets;
 
1059         dev->stats.tx_bytes += len;
 
1065 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
1067         struct ocp_enet_private *dev = ndev->priv;
 
1068         unsigned int len = skb->len;
 
1071         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
 
1072             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
 
1074         slot = dev->tx_slot++;
 
1075         if (dev->tx_slot == NUM_TX_BUFF) {
 
1077                 ctrl |= MAL_TX_CTRL_WRAP;
 
1080         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
 
1082         dev->tx_skb[slot] = skb;
 
1083         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
 
1085         dev->tx_desc[slot].data_len = (u16) len;
 
1087         dev->tx_desc[slot].ctrl = ctrl;
 
1089         return emac_xmit_finish(dev, len);
 
1092 #if defined(CONFIG_IBM_EMAC_TAH)
 
1093 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
 
1094                                   u32 pd, int len, int last, u16 base_ctrl)
 
1097                 u16 ctrl = base_ctrl;
 
1098                 int chunk = min(len, MAL_MAX_TX_SIZE);
 
1101                 slot = (slot + 1) % NUM_TX_BUFF;
 
1104                         ctrl |= MAL_TX_CTRL_LAST;
 
1105                 if (slot == NUM_TX_BUFF - 1)
 
1106                         ctrl |= MAL_TX_CTRL_WRAP;
 
1108                 dev->tx_skb[slot] = NULL;
 
1109                 dev->tx_desc[slot].data_ptr = pd;
 
1110                 dev->tx_desc[slot].data_len = (u16) chunk;
 
1111                 dev->tx_desc[slot].ctrl = ctrl;
 
1122 /* BHs disabled (SG version for TAH equipped EMACs) */
 
1123 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
 
1125         struct ocp_enet_private *dev = ndev->priv;
 
1126         int nr_frags = skb_shinfo(skb)->nr_frags;
 
1127         int len = skb->len, chunk;
 
1132         /* This is common "fast" path */
 
1133         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
 
1134                 return emac_start_xmit(skb, ndev);
 
1136         len -= skb->data_len;
 
1138         /* Note, this is only an *estimation*, we can still run out of empty
 
1139          * slots because of the additional fragmentation into
 
1140          * MAL_MAX_TX_SIZE-sized chunks
 
1142         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
 
1145         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
 
1146             emac_tx_csum(dev, skb);
 
1147         slot = dev->tx_slot;
 
1150         dev->tx_skb[slot] = NULL;
 
1151         chunk = min(len, MAL_MAX_TX_SIZE);
 
1152         dev->tx_desc[slot].data_ptr = pd =
 
1153             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
 
1154         dev->tx_desc[slot].data_len = (u16) chunk;
 
1157                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
 
1160         for (i = 0; i < nr_frags; ++i) {
 
1161                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
 
1164                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
 
1167                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
 
1170                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
 
1174         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
 
1175              dev->tx_slot, slot);
 
1177         /* Attach skb to the last slot so we don't release it too early */
 
1178         dev->tx_skb[slot] = skb;
 
1180         /* Send the packet out */
 
1181         if (dev->tx_slot == NUM_TX_BUFF - 1)
 
1182                 ctrl |= MAL_TX_CTRL_WRAP;
 
1184         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
 
1185         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
 
1187         return emac_xmit_finish(dev, skb->len);
 
1190         /* Well, too bad. Our previous estimation was overly optimistic. 
 
1193         while (slot != dev->tx_slot) {
 
1194                 dev->tx_desc[slot].ctrl = 0;
 
1197                         slot = NUM_TX_BUFF - 1;
 
1199         ++dev->estats.tx_undo;
 
1202         netif_stop_queue(ndev);
 
1203         DBG2("%d: stopped TX queue" NL, dev->def->index);
 
1207 # define emac_start_xmit_sg     emac_start_xmit
 
1208 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
 
1211 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
 
1213         struct ibm_emac_error_stats *st = &dev->estats;
 
1214         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
 
1217         if (ctrl & EMAC_TX_ST_BFCS)
 
1218                 ++st->tx_bd_bad_fcs;
 
1219         if (ctrl & EMAC_TX_ST_LCS)
 
1220                 ++st->tx_bd_carrier_loss;
 
1221         if (ctrl & EMAC_TX_ST_ED)
 
1222                 ++st->tx_bd_excessive_deferral;
 
1223         if (ctrl & EMAC_TX_ST_EC)
 
1224                 ++st->tx_bd_excessive_collisions;
 
1225         if (ctrl & EMAC_TX_ST_LC)
 
1226                 ++st->tx_bd_late_collision;
 
1227         if (ctrl & EMAC_TX_ST_MC)
 
1228                 ++st->tx_bd_multple_collisions;
 
1229         if (ctrl & EMAC_TX_ST_SC)
 
1230                 ++st->tx_bd_single_collision;
 
1231         if (ctrl & EMAC_TX_ST_UR)
 
1232                 ++st->tx_bd_underrun;
 
1233         if (ctrl & EMAC_TX_ST_SQE)
 
1237 static void emac_poll_tx(void *param)
 
1239         struct ocp_enet_private *dev = param;
 
1240         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
 
1245                 int slot = dev->ack_slot, n = 0;
 
1247                 ctrl = dev->tx_desc[slot].ctrl;
 
1248                 if (!(ctrl & MAL_TX_CTRL_READY)) {
 
1249                         struct sk_buff *skb = dev->tx_skb[slot];
 
1254                                 dev->tx_skb[slot] = NULL;
 
1256                         slot = (slot + 1) % NUM_TX_BUFF;
 
1258                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
 
1259                                 emac_parse_tx_error(dev, ctrl);
 
1265                         dev->ack_slot = slot;
 
1266                         if (netif_queue_stopped(dev->ndev) &&
 
1267                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
 
1268                                 netif_wake_queue(dev->ndev);
 
1270                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
 
1275 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
 
1278         struct sk_buff *skb = dev->rx_skb[slot];
 
1279         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
 
1282                 dma_map_single(dev->ldev, skb->data - 2, 
 
1283                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
 
1285         dev->rx_desc[slot].data_len = 0;
 
1287         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
 
1288             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 
1291 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
 
1293         struct ibm_emac_error_stats *st = &dev->estats;
 
1294         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
 
1297         if (ctrl & EMAC_RX_ST_OE)
 
1298                 ++st->rx_bd_overrun;
 
1299         if (ctrl & EMAC_RX_ST_BP)
 
1300                 ++st->rx_bd_bad_packet;
 
1301         if (ctrl & EMAC_RX_ST_RP)
 
1302                 ++st->rx_bd_runt_packet;
 
1303         if (ctrl & EMAC_RX_ST_SE)
 
1304                 ++st->rx_bd_short_event;
 
1305         if (ctrl & EMAC_RX_ST_AE)
 
1306                 ++st->rx_bd_alignment_error;
 
1307         if (ctrl & EMAC_RX_ST_BFCS)
 
1308                 ++st->rx_bd_bad_fcs;
 
1309         if (ctrl & EMAC_RX_ST_PTL)
 
1310                 ++st->rx_bd_packet_too_long;
 
1311         if (ctrl & EMAC_RX_ST_ORE)
 
1312                 ++st->rx_bd_out_of_range;
 
1313         if (ctrl & EMAC_RX_ST_IRE)
 
1314                 ++st->rx_bd_in_range;
 
1317 static inline void emac_rx_csum(struct ocp_enet_private *dev,
 
1318                                 struct sk_buff *skb, u16 ctrl)
 
1320 #if defined(CONFIG_IBM_EMAC_TAH)
 
1321         if (!ctrl && dev->tah_dev) {
 
1322                 skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1323                 ++dev->stats.rx_packets_csum;
 
1328 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
 
1330         if (likely(dev->rx_sg_skb != NULL)) {
 
1331                 int len = dev->rx_desc[slot].data_len;
 
1332                 int tot_len = dev->rx_sg_skb->len + len;
 
1334                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
 
1335                         ++dev->estats.rx_dropped_mtu;
 
1336                         dev_kfree_skb(dev->rx_sg_skb);
 
1337                         dev->rx_sg_skb = NULL;
 
1339                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
 
1340                                          dev->rx_skb[slot]->data, len);
 
1341                         skb_put(dev->rx_sg_skb, len);
 
1342                         emac_recycle_rx_skb(dev, slot, len);
 
1346         emac_recycle_rx_skb(dev, slot, 0);
 
1351 static int emac_poll_rx(void *param, int budget)
 
1353         struct ocp_enet_private *dev = param;
 
1354         int slot = dev->rx_slot, received = 0;
 
1356         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
 
1359         while (budget > 0) {
 
1361                 struct sk_buff *skb;
 
1362                 u16 ctrl = dev->rx_desc[slot].ctrl;
 
1364                 if (ctrl & MAL_RX_CTRL_EMPTY)
 
1367                 skb = dev->rx_skb[slot];
 
1369                 len = dev->rx_desc[slot].data_len;
 
1371                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
 
1374                 ctrl &= EMAC_BAD_RX_MASK;
 
1375                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
 
1376                         emac_parse_rx_error(dev, ctrl);
 
1377                         ++dev->estats.rx_dropped_error;
 
1378                         emac_recycle_rx_skb(dev, slot, 0);
 
1383                 if (len && len < EMAC_RX_COPY_THRESH) {
 
1384                         struct sk_buff *copy_skb =
 
1385                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
 
1386                         if (unlikely(!copy_skb))
 
1389                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
 
1390                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
 
1392                         emac_recycle_rx_skb(dev, slot, len);
 
1394                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
 
1399                 skb->protocol = eth_type_trans(skb, dev->ndev);
 
1400                 emac_rx_csum(dev, skb, ctrl);
 
1402                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
 
1403                         ++dev->estats.rx_dropped_stack;
 
1405                 ++dev->stats.rx_packets;
 
1407                 dev->stats.rx_bytes += len;
 
1408                 slot = (slot + 1) % NUM_RX_BUFF;
 
1413                 if (ctrl & MAL_RX_CTRL_FIRST) {
 
1414                         BUG_ON(dev->rx_sg_skb);
 
1415                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
 
1416                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
 
1417                                 ++dev->estats.rx_dropped_oom;
 
1418                                 emac_recycle_rx_skb(dev, slot, 0);
 
1420                                 dev->rx_sg_skb = skb;
 
1423                 } else if (!emac_rx_sg_append(dev, slot) &&
 
1424                            (ctrl & MAL_RX_CTRL_LAST)) {
 
1426                         skb = dev->rx_sg_skb;
 
1427                         dev->rx_sg_skb = NULL;
 
1429                         ctrl &= EMAC_BAD_RX_MASK;
 
1430                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
 
1431                                 emac_parse_rx_error(dev, ctrl);
 
1432                                 ++dev->estats.rx_dropped_error;
 
1440                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
 
1441                 /* Drop the packet and recycle skb */
 
1442                 ++dev->estats.rx_dropped_oom;
 
1443                 emac_recycle_rx_skb(dev, slot, 0);
 
1448                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
 
1449                 dev->rx_slot = slot;
 
1452         if (unlikely(budget && dev->commac.rx_stopped)) {
 
1453                 struct ocp_func_emac_data *emacdata = dev->def->additions;
 
1456                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
 
1457                         DBG2("%d: rx restart" NL, dev->def->index);
 
1462                 if (dev->rx_sg_skb) {
 
1463                         DBG2("%d: dropping partial rx packet" NL,
 
1465                         ++dev->estats.rx_dropped_error;
 
1466                         dev_kfree_skb(dev->rx_sg_skb);
 
1467                         dev->rx_sg_skb = NULL;
 
1470                 dev->commac.rx_stopped = 0;
 
1471                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
 
1472                 emac_rx_enable(dev);
 
1479 static int emac_peek_rx(void *param)
 
1481         struct ocp_enet_private *dev = param;
 
1482         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
 
1486 static int emac_peek_rx_sg(void *param)
 
1488         struct ocp_enet_private *dev = param;
 
1489         int slot = dev->rx_slot;
 
1491                 u16 ctrl = dev->rx_desc[slot].ctrl;
 
1492                 if (ctrl & MAL_RX_CTRL_EMPTY)
 
1494                 else if (ctrl & MAL_RX_CTRL_LAST)
 
1497                 slot = (slot + 1) % NUM_RX_BUFF;
 
1499                 /* I'm just being paranoid here :) */
 
1500                 if (unlikely(slot == dev->rx_slot))
 
1506 static void emac_rxde(void *param)
 
1508         struct ocp_enet_private *dev = param;
 
1509         ++dev->estats.rx_stopped;
 
1510         emac_rx_disable_async(dev);
 
1514 static irqreturn_t emac_irq(int irq, void *dev_instance)
 
1516         struct ocp_enet_private *dev = dev_instance;
 
1517         struct emac_regs __iomem *p = dev->emacp;
 
1518         struct ibm_emac_error_stats *st = &dev->estats;
 
1520         u32 isr = in_be32(&p->isr);
 
1521         out_be32(&p->isr, isr);
 
1523         DBG("%d: isr = %08x" NL, dev->def->index, isr);
 
1525         if (isr & EMAC_ISR_TXPE)
 
1527         if (isr & EMAC_ISR_RXPE)
 
1529         if (isr & EMAC_ISR_TXUE)
 
1531         if (isr & EMAC_ISR_RXOE)
 
1532                 ++st->rx_fifo_overrun;
 
1533         if (isr & EMAC_ISR_OVR)
 
1535         if (isr & EMAC_ISR_BP)
 
1536                 ++st->rx_bad_packet;
 
1537         if (isr & EMAC_ISR_RP)
 
1538                 ++st->rx_runt_packet;
 
1539         if (isr & EMAC_ISR_SE)
 
1540                 ++st->rx_short_event;
 
1541         if (isr & EMAC_ISR_ALE)
 
1542                 ++st->rx_alignment_error;
 
1543         if (isr & EMAC_ISR_BFCS)
 
1545         if (isr & EMAC_ISR_PTLE)
 
1546                 ++st->rx_packet_too_long;
 
1547         if (isr & EMAC_ISR_ORE)
 
1548                 ++st->rx_out_of_range;
 
1549         if (isr & EMAC_ISR_IRE)
 
1551         if (isr & EMAC_ISR_SQE)
 
1553         if (isr & EMAC_ISR_TE)
 
1559 static struct net_device_stats *emac_stats(struct net_device *ndev)
 
1561         struct ocp_enet_private *dev = ndev->priv;
 
1562         struct ibm_emac_stats *st = &dev->stats;
 
1563         struct ibm_emac_error_stats *est = &dev->estats;
 
1564         struct net_device_stats *nst = &dev->nstats;
 
1566         DBG2("%d: stats" NL, dev->def->index);
 
1568         /* Compute "legacy" statistics */
 
1569         local_irq_disable();
 
1570         nst->rx_packets = (unsigned long)st->rx_packets;
 
1571         nst->rx_bytes = (unsigned long)st->rx_bytes;
 
1572         nst->tx_packets = (unsigned long)st->tx_packets;
 
1573         nst->tx_bytes = (unsigned long)st->tx_bytes;
 
1574         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
 
1575                                           est->rx_dropped_error +
 
1576                                           est->rx_dropped_resize +
 
1577                                           est->rx_dropped_mtu);
 
1578         nst->tx_dropped = (unsigned long)est->tx_dropped;
 
1580         nst->rx_errors = (unsigned long)est->rx_bd_errors;
 
1581         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
 
1582                                               est->rx_fifo_overrun +
 
1584         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
 
1585                                                est->rx_alignment_error);
 
1586         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
 
1588         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
 
1589                                                 est->rx_bd_short_event +
 
1590                                                 est->rx_bd_packet_too_long +
 
1591                                                 est->rx_bd_out_of_range +
 
1592                                                 est->rx_bd_in_range +
 
1593                                                 est->rx_runt_packet +
 
1594                                                 est->rx_short_event +
 
1595                                                 est->rx_packet_too_long +
 
1596                                                 est->rx_out_of_range +
 
1599         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
 
1600         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
 
1602         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
 
1603         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
 
1604                                           est->tx_bd_excessive_collisions +
 
1605                                           est->tx_bd_late_collision +
 
1606                                           est->tx_bd_multple_collisions);
 
1611 static void emac_remove(struct ocp_device *ocpdev)
 
1613         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
 
1615         DBG("%d: remove" NL, dev->def->index);
 
1617         ocp_set_drvdata(ocpdev, NULL);
 
1618         unregister_netdev(dev->ndev);
 
1620         tah_fini(dev->tah_dev);
 
1621         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
 
1622         zmii_fini(dev->zmii_dev, dev->zmii_input);
 
1624         emac_dbg_register(dev->def->index, NULL);
 
1626         mal_unregister_commac(dev->mal, &dev->commac);
 
1627         iounmap(dev->emacp);
 
1631 static struct mal_commac_ops emac_commac_ops = {
 
1632         .poll_tx = &emac_poll_tx,
 
1633         .poll_rx = &emac_poll_rx,
 
1634         .peek_rx = &emac_peek_rx,
 
1638 static struct mal_commac_ops emac_commac_sg_ops = {
 
1639         .poll_tx = &emac_poll_tx,
 
1640         .poll_rx = &emac_poll_rx,
 
1641         .peek_rx = &emac_peek_rx_sg,
 
1645 /* Ethtool support */
 
1646 static int emac_ethtool_get_settings(struct net_device *ndev,
 
1647                                      struct ethtool_cmd *cmd)
 
1649         struct ocp_enet_private *dev = ndev->priv;
 
1651         cmd->supported = dev->phy.features;
 
1652         cmd->port = PORT_MII;
 
1653         cmd->phy_address = dev->phy.address;
 
1655             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
 
1658         cmd->advertising = dev->phy.advertising;
 
1659         cmd->autoneg = dev->phy.autoneg;
 
1660         cmd->speed = dev->phy.speed;
 
1661         cmd->duplex = dev->phy.duplex;
 
1667 static int emac_ethtool_set_settings(struct net_device *ndev,
 
1668                                      struct ethtool_cmd *cmd)
 
1670         struct ocp_enet_private *dev = ndev->priv;
 
1671         u32 f = dev->phy.features;
 
1673         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
 
1674             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
 
1676         /* Basic sanity checks */
 
1677         if (dev->phy.address < 0)
 
1679         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
 
1681         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
 
1683         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
 
1686         if (cmd->autoneg == AUTONEG_DISABLE) {
 
1687                 switch (cmd->speed) {
 
1689                         if (cmd->duplex == DUPLEX_HALF
 
1690                             && !(f & SUPPORTED_10baseT_Half))
 
1692                         if (cmd->duplex == DUPLEX_FULL
 
1693                             && !(f & SUPPORTED_10baseT_Full))
 
1697                         if (cmd->duplex == DUPLEX_HALF
 
1698                             && !(f & SUPPORTED_100baseT_Half))
 
1700                         if (cmd->duplex == DUPLEX_FULL
 
1701                             && !(f & SUPPORTED_100baseT_Full))
 
1705                         if (cmd->duplex == DUPLEX_HALF
 
1706                             && !(f & SUPPORTED_1000baseT_Half))
 
1708                         if (cmd->duplex == DUPLEX_FULL
 
1709                             && !(f & SUPPORTED_1000baseT_Full))
 
1717                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
 
1721                 if (!(f & SUPPORTED_Autoneg))
 
1725                 dev->phy.def->ops->setup_aneg(&dev->phy,
 
1726                                               (cmd->advertising & f) |
 
1727                                               (dev->phy.advertising &
 
1729                                                 ADVERTISED_Asym_Pause)));
 
1731         emac_force_link_update(dev);
 
1737 static void emac_ethtool_get_ringparam(struct net_device *ndev,
 
1738                                        struct ethtool_ringparam *rp)
 
1740         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
 
1741         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
 
1744 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
 
1745                                         struct ethtool_pauseparam *pp)
 
1747         struct ocp_enet_private *dev = ndev->priv;
 
1750         if ((dev->phy.features & SUPPORTED_Autoneg) &&
 
1751             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
 
1754         if (dev->phy.duplex == DUPLEX_FULL) {
 
1756                         pp->rx_pause = pp->tx_pause = 1;
 
1757                 else if (dev->phy.asym_pause)
 
1763 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
 
1765         struct ocp_enet_private *dev = ndev->priv;
 
1766         return dev->tah_dev != 0;
 
1769 static int emac_get_regs_len(struct ocp_enet_private *dev)
 
1771         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
 
1774 static int emac_ethtool_get_regs_len(struct net_device *ndev)
 
1776         struct ocp_enet_private *dev = ndev->priv;
 
1777         return sizeof(struct emac_ethtool_regs_hdr) +
 
1778             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
 
1779             zmii_get_regs_len(dev->zmii_dev) +
 
1780             rgmii_get_regs_len(dev->rgmii_dev) +
 
1781             tah_get_regs_len(dev->tah_dev);
 
1784 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
 
1786         struct emac_ethtool_regs_subhdr *hdr = buf;
 
1788         hdr->version = EMAC_ETHTOOL_REGS_VER;
 
1789         hdr->index = dev->def->index;
 
1790         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
 
1791         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
 
1794 static void emac_ethtool_get_regs(struct net_device *ndev,
 
1795                                   struct ethtool_regs *regs, void *buf)
 
1797         struct ocp_enet_private *dev = ndev->priv;
 
1798         struct emac_ethtool_regs_hdr *hdr = buf;
 
1800         hdr->components = 0;
 
1803         local_irq_disable();
 
1804         buf = mal_dump_regs(dev->mal, buf);
 
1805         buf = emac_dump_regs(dev, buf);
 
1806         if (dev->zmii_dev) {
 
1807                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
 
1808                 buf = zmii_dump_regs(dev->zmii_dev, buf);
 
1810         if (dev->rgmii_dev) {
 
1811                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
 
1812                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
 
1815                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
 
1816                 buf = tah_dump_regs(dev->tah_dev, buf);
 
1821 static int emac_ethtool_nway_reset(struct net_device *ndev)
 
1823         struct ocp_enet_private *dev = ndev->priv;
 
1826         DBG("%d: nway_reset" NL, dev->def->index);
 
1828         if (dev->phy.address < 0)
 
1832         if (!dev->phy.autoneg) {
 
1837         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
 
1838         emac_force_link_update(dev);
 
1845 static int emac_get_sset_count(struct net_device *ndev, int sset)
 
1849                 return EMAC_ETHTOOL_STATS_COUNT;
 
1855 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
 
1858         if (stringset == ETH_SS_STATS)
 
1859                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
 
1862 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
 
1863                                            struct ethtool_stats *estats,
 
1866         struct ocp_enet_private *dev = ndev->priv;
 
1867         local_irq_disable();
 
1868         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
 
1869         tmp_stats += sizeof(dev->stats) / sizeof(u64);
 
1870         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
 
1874 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
 
1875                                      struct ethtool_drvinfo *info)
 
1877         struct ocp_enet_private *dev = ndev->priv;
 
1879         strcpy(info->driver, "ibm_emac");
 
1880         strcpy(info->version, DRV_VERSION);
 
1881         info->fw_version[0] = '\0';
 
1882         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
 
1883         info->regdump_len = emac_ethtool_get_regs_len(ndev);
 
1886 static const struct ethtool_ops emac_ethtool_ops = {
 
1887         .get_settings = emac_ethtool_get_settings,
 
1888         .set_settings = emac_ethtool_set_settings,
 
1889         .get_drvinfo = emac_ethtool_get_drvinfo,
 
1891         .get_regs_len = emac_ethtool_get_regs_len,
 
1892         .get_regs = emac_ethtool_get_regs,
 
1894         .nway_reset = emac_ethtool_nway_reset,
 
1896         .get_ringparam = emac_ethtool_get_ringparam,
 
1897         .get_pauseparam = emac_ethtool_get_pauseparam,
 
1899         .get_rx_csum = emac_ethtool_get_rx_csum,
 
1901         .get_strings = emac_ethtool_get_strings,
 
1902         .get_sset_count = emac_get_sset_count,
 
1903         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
 
1905         .get_link = ethtool_op_get_link,
 
1908 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 
1910         struct ocp_enet_private *dev = ndev->priv;
 
1911         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
 
1913         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
 
1915         if (dev->phy.address < 0)
 
1920         case SIOCDEVPRIVATE:
 
1921                 data[0] = dev->phy.address;
 
1924         case SIOCDEVPRIVATE + 1:
 
1925                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
 
1929         case SIOCDEVPRIVATE + 2:
 
1930                 if (!capable(CAP_NET_ADMIN))
 
1932                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
 
1939 static int __init emac_probe(struct ocp_device *ocpdev)
 
1941         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
 
1942         struct net_device *ndev;
 
1943         struct ocp_device *maldev;
 
1944         struct ocp_enet_private *dev;
 
1946         DECLARE_MAC_BUF(mac);
 
1948         DBG("%d: probe" NL, ocpdev->def->index);
 
1951                 printk(KERN_ERR "emac%d: Missing additional data!\n",
 
1952                        ocpdev->def->index);
 
1956         /* Allocate our net_device structure */
 
1957         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
 
1959                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
 
1960                        ocpdev->def->index);
 
1965         dev->ldev = &ocpdev->dev;
 
1966         dev->def = ocpdev->def;
 
1968         /* Find MAL device we are connected to */
 
1970             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
 
1972                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
 
1973                        dev->def->index, emacdata->mal_idx);
 
1977         dev->mal = ocp_get_drvdata(maldev);
 
1979                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
 
1980                        dev->def->index, emacdata->mal_idx);
 
1985         /* Register with MAL */
 
1986         dev->commac.ops = &emac_commac_ops;
 
1987         dev->commac.dev = dev;
 
1988         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
 
1989         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
 
1990         err = mal_register_commac(dev->mal, &dev->commac);
 
1992                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
 
1993                        dev->def->index, emacdata->mal_idx);
 
1996         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
 
1997         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
 
1999         /* Get pointers to BD rings */
 
2001             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
 
2002                                                  emacdata->mal_tx_chan);
 
2004             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
 
2005                                                  emacdata->mal_rx_chan);
 
2007         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
 
2008         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
 
2011         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
 
2012         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
 
2014         /* If we depend on another EMAC for MDIO, check whether it was probed already */
 
2015         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
 
2016                 struct ocp_device *mdiodev =
 
2017                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
 
2018                                     emacdata->mdio_idx);
 
2020                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
 
2021                                dev->def->index, emacdata->mdio_idx);
 
2025                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
 
2026                 if (!dev->mdio_dev) {
 
2028                                "emac%d: emac%d hasn't been initialized yet!\n",
 
2029                                dev->def->index, emacdata->mdio_idx);
 
2035         /* Attach to ZMII, if needed */
 
2036         if ((err = zmii_attach(dev)) != 0)
 
2039         /* Attach to RGMII, if needed */
 
2040         if ((err = rgmii_attach(dev)) != 0)
 
2043         /* Attach to TAH, if needed */
 
2044         if ((err = tah_attach(dev)) != 0)
 
2048         dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
 
2050                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
 
2056         /* Fill in MAC address */
 
2057         for (i = 0; i < 6; ++i)
 
2058                 ndev->dev_addr[i] = emacdata->mac_addr[i];
 
2060         /* Set some link defaults before we can find out real parameters */
 
2061         dev->phy.speed = SPEED_100;
 
2062         dev->phy.duplex = DUPLEX_FULL;
 
2063         dev->phy.autoneg = AUTONEG_DISABLE;
 
2064         dev->phy.pause = dev->phy.asym_pause = 0;
 
2065         dev->stop_timeout = STOP_TIMEOUT_100;
 
2066         init_timer(&dev->link_timer);
 
2067         dev->link_timer.function = emac_link_timer;
 
2068         dev->link_timer.data = (unsigned long)dev;
 
2070         /* Find PHY if any */
 
2071         dev->phy.dev = ndev;
 
2072         dev->phy.mode = emacdata->phy_mode;
 
2073         if (emacdata->phy_map != 0xffffffff) {
 
2074                 u32 phy_map = emacdata->phy_map | busy_phy_map;
 
2077                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
 
2078                     emacdata->phy_map, busy_phy_map);
 
2080                 EMAC_RX_CLK_TX(dev->def->index);
 
2082                 dev->phy.mdio_read = emac_mdio_read;
 
2083                 dev->phy.mdio_write = emac_mdio_write;
 
2085                 /* Configure EMAC with defaults so we can at least use MDIO
 
2086                  * This is needed mostly for 440GX
 
2088                 if (emac_phy_gpcs(dev->phy.mode)) {
 
2090                          * Make GPCS PHY address equal to EMAC index.
 
2091                          * We probably should take into account busy_phy_map
 
2092                          * and/or phy_map here.
 
2094                         dev->phy.address = dev->def->index;
 
2097                 emac_configure(dev);
 
2099                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
 
2100                         if (!(phy_map & 1)) {
 
2102                                 busy_phy_map |= 1 << i;
 
2104                                 /* Quick check if there is a PHY at the address */
 
2105                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
 
2106                                 if (r == 0xffff || r < 0)
 
2108                                 if (!mii_phy_probe(&dev->phy, i))
 
2112                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
 
2118                 if (dev->phy.def->ops->init)
 
2119                         dev->phy.def->ops->init(&dev->phy);
 
2121                 /* Disable any PHY features not supported by the platform */
 
2122                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
 
2124                 /* Setup initial link parameters */
 
2125                 if (dev->phy.features & SUPPORTED_Autoneg) {
 
2126                         adv = dev->phy.features;
 
2127 #if !defined(CONFIG_40x)
 
2128                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
 
2130                         /* Restart autonegotiation */
 
2131                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
 
2133                         u32 f = dev->phy.def->features;
 
2134                         int speed = SPEED_10, fd = DUPLEX_HALF;
 
2136                         /* Select highest supported speed/duplex */
 
2137                         if (f & SUPPORTED_1000baseT_Full) {
 
2140                         } else if (f & SUPPORTED_1000baseT_Half)
 
2142                         else if (f & SUPPORTED_100baseT_Full) {
 
2145                         } else if (f & SUPPORTED_100baseT_Half)
 
2147                         else if (f & SUPPORTED_10baseT_Full)
 
2150                         /* Force link parameters */
 
2151                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
 
2156                 /* PHY-less configuration.
 
2157                  * XXX I probably should move these settings to emacdata
 
2159                 dev->phy.address = -1;
 
2160                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
 
2164         /* Fill in the driver function table */
 
2165         ndev->open = &emac_open;
 
2167                 ndev->hard_start_xmit = &emac_start_xmit_sg;
 
2168                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 
2170                 ndev->hard_start_xmit = &emac_start_xmit;
 
2171         ndev->tx_timeout = &emac_full_tx_reset;
 
2172         ndev->watchdog_timeo = 5 * HZ;
 
2173         ndev->stop = &emac_close;
 
2174         ndev->get_stats = &emac_stats;
 
2175         ndev->set_multicast_list = &emac_set_multicast_list;
 
2176         ndev->do_ioctl = &emac_ioctl;
 
2177         if (emac_phy_supports_gige(emacdata->phy_mode)) {
 
2178                 ndev->change_mtu = &emac_change_mtu;
 
2179                 dev->commac.ops = &emac_commac_sg_ops;
 
2181         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
 
2183         netif_carrier_off(ndev);
 
2184         netif_stop_queue(ndev);
 
2186         err = register_netdev(ndev);
 
2188                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
 
2189                        dev->def->index, err);
 
2193         ocp_set_drvdata(ocpdev, dev);
 
2195         printk("%s: emac%d, MAC %s\n",
 
2196                ndev->name, dev->def->index, print_mac(mac, ndev->dev_addr));
 
2198         if (dev->phy.address >= 0)
 
2199                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
 
2200                        dev->phy.def->name, dev->phy.address);
 
2202         emac_dbg_register(dev->def->index, dev);
 
2206         iounmap(dev->emacp);
 
2208         tah_fini(dev->tah_dev);
 
2210         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
 
2212         zmii_fini(dev->zmii_dev, dev->zmii_input);
 
2214         mal_unregister_commac(dev->mal, &dev->commac);
 
2220 static struct ocp_device_id emac_ids[] = {
 
2221         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
 
2222         { .vendor = OCP_VENDOR_INVALID}
 
2225 static struct ocp_driver emac_driver = {
 
2227         .id_table = emac_ids,
 
2228         .probe = emac_probe,
 
2229         .remove = emac_remove,
 
2232 static int __init emac_init(void)
 
2234         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
 
2242         if (ocp_register_driver(&emac_driver)) {
 
2244                 ocp_unregister_driver(&emac_driver);
 
2254 static void __exit emac_exit(void)
 
2257         ocp_unregister_driver(&emac_driver);
 
2262 module_init(emac_init);
 
2263 module_exit(emac_exit);