1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
 
   3         Written 2002-2004 by David Dillow <dave@thedillows.org>
 
   4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
 
   5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
 
   7         This software may be used and distributed according to the terms of
 
   8         the GNU General Public License (GPL), incorporated herein by reference.
 
   9         Drivers based on or derived from this code fall under the GPL and must
 
  10         retain the authorship, copyright and license notice.  This file is not
 
  11         a complete program and may only be used when the entire operating
 
  12         system is licensed under the GPL.
 
  14         This software is available on a public web site. It may enable
 
  15         cryptographic capabilities of the 3Com hardware, and may be
 
  16         exported from the United States under License Exception "TSU"
 
  17         pursuant to 15 C.F.R. Section 740.13(e).
 
  19         This work was funded by the National Library of Medicine under
 
  20         the Department of Energy project number 0274DD06D1 and NLM project
 
  23         This driver is designed for the 3Com 3CR990 Family of cards with the
 
  24         3XP Processor. It has been tested on x86 and sparc64.
 
  27         *) The current firmware always strips the VLAN tag off, even if
 
  28                 we tell it not to. You should filter VLANs at the switch
 
  29                 as a workaround (good practice in any event) until we can
 
  31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
 
  32                 issue. Hopefully 3Com will fix it.
 
  33         *) Waiting for a command response takes 8ms due to non-preemptable
 
  34                 polling. Only significant for getting stats and creating
 
  35                 SAs, but an ugly wart never the less.
 
  38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
 
  39         *) Add more support for ethtool (especially for NIC stats)
 
  40         *) Allow disabling of RX checksum offloading
 
  41         *) Fix MAC changing to work while the interface is up
 
  42                 (Need to put commands on the TX ring, which changes
 
  44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
 
  45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
 
  48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
 
  49  * Setting to > 1518 effectively disables this feature.
 
  51 static int rx_copybreak = 200;
 
  53 /* Should we use MMIO or Port IO?
 
  56  * 2: Try MMIO, fallback to Port IO
 
  58 static unsigned int use_mmio = 2;
 
  60 /* end user-configurable values */
 
  62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
 
  64 static const int multicast_filter_limit = 32;
 
  66 /* Operational parameters that are set at compile time. */
 
  68 /* Keep the ring sizes a power of two for compile efficiency.
 
  69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
 
  70  * Making the Tx ring too large decreases the effectiveness of channel
 
  71  * bonding and packet priority.
 
  72  * There are no ill effects from too-large receive rings.
 
  74  * We don't currently use the Hi Tx ring so, don't make it very big.
 
  76  * Beware that if we start using the Hi Tx ring, we will need to change
 
  77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
 
  79 #define TXHI_ENTRIES            2
 
  80 #define TXLO_ENTRIES            128
 
  82 #define COMMAND_ENTRIES         16
 
  83 #define RESPONSE_ENTRIES        32
 
  85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
 
  86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
 
  88 /* The 3XP will preload and remove 64 entries from the free buffer
 
  89  * list, and we need one entry to keep the ring from wrapping, so 
 
  90  * to keep this a power of two, we use 128 entries.
 
  92 #define RXFREE_ENTRIES          128
 
  93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
 
  95 /* Operational parameters that usually are not changed. */
 
  97 /* Time in jiffies before concluding the transmitter is hung. */
 
  98 #define TX_TIMEOUT  (2*HZ)
 
 100 #define PKT_BUF_SZ              1536
 
 102 #define DRV_MODULE_NAME         "typhoon"
 
 103 #define DRV_MODULE_VERSION      "1.5.7"
 
 104 #define DRV_MODULE_RELDATE      "05/01/07"
 
 105 #define PFX                     DRV_MODULE_NAME ": "
 
 106 #define ERR_PFX                 KERN_ERR PFX
 
 108 #include <linux/module.h>
 
 109 #include <linux/kernel.h>
 
 110 #include <linux/string.h>
 
 111 #include <linux/timer.h>
 
 112 #include <linux/errno.h>
 
 113 #include <linux/ioport.h>
 
 114 #include <linux/slab.h>
 
 115 #include <linux/interrupt.h>
 
 116 #include <linux/pci.h>
 
 117 #include <linux/netdevice.h>
 
 118 #include <linux/etherdevice.h>
 
 119 #include <linux/skbuff.h>
 
 120 #include <linux/init.h>
 
 121 #include <linux/delay.h>
 
 122 #include <linux/ethtool.h>
 
 123 #include <linux/if_vlan.h>
 
 124 #include <linux/crc32.h>
 
 125 #include <linux/bitops.h>
 
 126 #include <asm/processor.h>
 
 128 #include <asm/uaccess.h>
 
 129 #include <linux/in6.h>
 
 130 #include <asm/checksum.h>
 
 131 #include <linux/version.h>
 
 132 #include <linux/dma-mapping.h>
 
 135 #include "typhoon-firmware.h"
 
 137 static char version[] __devinitdata =
 
 138     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
 
 141 MODULE_VERSION(DRV_MODULE_VERSION);
 
 142 MODULE_LICENSE("GPL");
 
 143 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
 
 144 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
 
 145                                "the buffer given back to the NIC. Default "
 
 147 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
 
 148                            "Default is to try MMIO and fallback to PIO.");
 
 149 module_param(rx_copybreak, int, 0);
 
 150 module_param(use_mmio, int, 0);
 
 152 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 
 153 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 
 157 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 
 158 #error TX ring too small!
 
 161 struct typhoon_card_info {
 
 166 #define TYPHOON_CRYPTO_NONE             0x00
 
 167 #define TYPHOON_CRYPTO_DES              0x01
 
 168 #define TYPHOON_CRYPTO_3DES             0x02
 
 169 #define TYPHOON_CRYPTO_VARIABLE         0x04
 
 170 #define TYPHOON_FIBER                   0x08
 
 171 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
 
 174         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
 
 175         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
 
 176         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
 
 180 /* directly indexed by enum typhoon_cards, above */
 
 181 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
 
 182         { "3Com Typhoon (3C990-TX)",
 
 183                 TYPHOON_CRYPTO_NONE},
 
 184         { "3Com Typhoon (3CR990-TX-95)",
 
 186         { "3Com Typhoon (3CR990-TX-97)",
 
 187                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
 
 188         { "3Com Typhoon (3C990SVR)",
 
 189                 TYPHOON_CRYPTO_NONE},
 
 190         { "3Com Typhoon (3CR990SVR95)",
 
 192         { "3Com Typhoon (3CR990SVR97)",
 
 193                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
 
 194         { "3Com Typhoon2 (3C990B-TX-M)",
 
 195                 TYPHOON_CRYPTO_VARIABLE},
 
 196         { "3Com Typhoon2 (3C990BSVR)",
 
 197                 TYPHOON_CRYPTO_VARIABLE},
 
 198         { "3Com Typhoon (3CR990-FX-95)",
 
 199                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
 
 200         { "3Com Typhoon (3CR990-FX-97)",
 
 201                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
 
 202         { "3Com Typhoon (3CR990-FX-95 Server)",
 
 203                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
 
 204         { "3Com Typhoon (3CR990-FX-97 Server)",
 
 205                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
 
 206         { "3Com Typhoon2 (3C990B-FX-97)",
 
 207                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
 
 210 /* Notes on the new subsystem numbering scheme:
 
 211  * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
 
 212  * bit 4 indicates if this card has secured firmware (we don't support it)
 
 213  * bit 8 indicates if this is a (0) copper or (1) fiber card
 
 214  * bits 12-16 indicate card type: (0) client and (1) server
 
 216 static struct pci_device_id typhoon_pci_tbl[] = {
 
 217         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
 
 218           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
 
 219         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
 
 220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
 
 221         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
 
 222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
 
 223         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 
 224           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
 
 225         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 
 226           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
 
 227         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 
 228           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
 
 229         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 
 230           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
 
 231         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 
 232           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
 
 233         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 
 234           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
 
 235         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 
 236           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
 
 237         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
 
 238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
 
 239         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
 
 240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
 
 241         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
 
 242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
 
 245 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
 
 247 /* Define the shared memory area
 
 248  * Align everything the 3XP will normally be using.
 
 249  * We'll need to move/align txHi if we start using that ring.
 
 251 #define __3xp_aligned   ____cacheline_aligned
 
 252 struct typhoon_shared {
 
 253         struct typhoon_interface        iface;
 
 254         struct typhoon_indexes          indexes                 __3xp_aligned;
 
 255         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
 
 256         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
 
 257         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
 
 258         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
 
 259         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
 
 260         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
 
 262         struct tx_desc                  txHi[TXHI_ENTRIES];
 
 263 } __attribute__ ((packed));
 
 271         /* Tx cache line section */
 
 272         struct transmit_ring    txLoRing        ____cacheline_aligned;  
 
 273         struct pci_dev *        tx_pdev;
 
 274         void __iomem            *tx_ioaddr;
 
 277         /* Irq/Rx cache line section */
 
 278         void __iomem            *ioaddr         ____cacheline_aligned;
 
 279         struct typhoon_indexes *indexes;
 
 284         struct basic_ring       rxLoRing;
 
 285         struct pci_dev *        pdev;
 
 286         struct net_device *     dev;
 
 287         spinlock_t              state_lock;
 
 288         struct vlan_group *     vlgrp;
 
 289         struct basic_ring       rxHiRing;
 
 290         struct basic_ring       rxBuffRing;
 
 291         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
 
 293         /* general section */
 
 294         spinlock_t              command_lock    ____cacheline_aligned;
 
 295         struct basic_ring       cmdRing;
 
 296         struct basic_ring       respRing;
 
 297         struct net_device_stats stats;
 
 298         struct net_device_stats stats_saved;
 
 300         struct typhoon_shared * shared;
 
 301         dma_addr_t              shared_dma;
 
 306         /* unused stuff (future use) */
 
 308         struct transmit_ring    txHiRing;
 
 311 enum completion_wait_values {
 
 312         NoWait = 0, WaitNoSleep, WaitSleep,
 
 315 /* These are the values for the typhoon.card_state variable.
 
 316  * These determine where the statistics will come from in get_stats().
 
 317  * The sleep image does not support the statistics we need.
 
 320         Sleeping = 0, Running,
 
 323 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
 
 324  * cannot pass a read, so this forces current writes to post.
 
 326 #define typhoon_post_pci_writes(x) \
 
 327         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 
 329 /* We'll wait up to six seconds for a reset, and half a second normally.
 
 331 #define TYPHOON_UDELAY                  50
 
 332 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
 
 333 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
 
 334 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
 
 336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
 
 337 #define typhoon_synchronize_irq(x) synchronize_irq()
 
 339 #define typhoon_synchronize_irq(x) synchronize_irq(x)
 
 342 #if defined(NETIF_F_TSO)
 
 343 #define skb_tso_size(x)         (skb_shinfo(x)->tso_size)
 
 344 #define TSO_NUM_DESCRIPTORS     2
 
 345 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
 
 347 #define NETIF_F_TSO             0
 
 348 #define skb_tso_size(x)         0
 
 349 #define TSO_NUM_DESCRIPTORS     0
 
 350 #define TSO_OFFLOAD_ON          0
 
 354 typhoon_inc_index(u32 *index, const int count, const int num_entries)
 
 356         /* Increment a ring index -- we can use this for all rings execept
 
 357          * the Rx rings, as they use different size descriptors
 
 358          * otherwise, everything is the same size as a cmd_desc
 
 360         *index += count * sizeof(struct cmd_desc);
 
 361         *index %= num_entries * sizeof(struct cmd_desc);
 
 365 typhoon_inc_cmd_index(u32 *index, const int count)
 
 367         typhoon_inc_index(index, count, COMMAND_ENTRIES);
 
 371 typhoon_inc_resp_index(u32 *index, const int count)
 
 373         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
 
 377 typhoon_inc_rxfree_index(u32 *index, const int count)
 
 379         typhoon_inc_index(index, count, RXFREE_ENTRIES);
 
 383 typhoon_inc_tx_index(u32 *index, const int count)
 
 385         /* if we start using the Hi Tx ring, this needs updateing */
 
 386         typhoon_inc_index(index, count, TXLO_ENTRIES);
 
 390 typhoon_inc_rx_index(u32 *index, const int count)
 
 392         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
 
 393         *index += count * sizeof(struct rx_desc);
 
 394         *index %= RX_ENTRIES * sizeof(struct rx_desc);
 
 398 typhoon_reset(void __iomem *ioaddr, int wait_type)
 
 403         if(wait_type == WaitNoSleep)
 
 404                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
 
 406                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
 
 408         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 
 409         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 
 411         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
 
 412         typhoon_post_pci_writes(ioaddr);
 
 414         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
 
 416         if(wait_type != NoWait) {
 
 417                 for(i = 0; i < timeout; i++) {
 
 418                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
 
 419                            TYPHOON_STATUS_WAITING_FOR_HOST)
 
 422                         if(wait_type == WaitSleep)
 
 423                                 schedule_timeout_uninterruptible(1);
 
 425                                 udelay(TYPHOON_UDELAY);
 
 432         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 
 433         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 
 435         /* The 3XP seems to need a little extra time to complete the load
 
 436          * of the sleep image before we can reliably boot it. Failure to
 
 437          * do this occasionally results in a hung adapter after boot in
 
 438          * typhoon_init_one() while trying to read the MAC address or
 
 439          * putting the card to sleep. 3Com's driver waits 5ms, but
 
 440          * that seems to be overkill. However, if we can sleep, we might
 
 441          * as well give it that much time. Otherwise, we'll give it 500us,
 
 442          * which should be enough (I've see it work well at 100us, but still
 
 443          * saw occasional problems.)
 
 445         if(wait_type == WaitSleep)
 
 453 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
 
 457         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 
 458                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
 
 460                 udelay(TYPHOON_UDELAY);
 
 470 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
 
 472         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
 
 473                 netif_carrier_off(dev);
 
 475                 netif_carrier_on(dev);
 
 479 typhoon_hello(struct typhoon *tp)
 
 481         struct basic_ring *ring = &tp->cmdRing;
 
 482         struct cmd_desc *cmd;
 
 484         /* We only get a hello request if we've not sent anything to the
 
 485          * card in a long while. If the lock is held, then we're in the
 
 486          * process of issuing a command, so we don't need to respond.
 
 488         if(spin_trylock(&tp->command_lock)) {
 
 489                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
 
 490                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
 
 492                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
 
 494                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
 
 495                 spin_unlock(&tp->command_lock);
 
 500 typhoon_process_response(struct typhoon *tp, int resp_size,
 
 501                                 struct resp_desc *resp_save)
 
 503         struct typhoon_indexes *indexes = tp->indexes;
 
 504         struct resp_desc *resp;
 
 505         u8 *base = tp->respRing.ringBase;
 
 506         int count, len, wrap_len;
 
 510         cleared = le32_to_cpu(indexes->respCleared);
 
 511         ready = le32_to_cpu(indexes->respReady);
 
 512         while(cleared != ready) {
 
 513                 resp = (struct resp_desc *)(base + cleared);
 
 514                 count = resp->numDesc + 1;
 
 515                 if(resp_save && resp->seqNo) {
 
 516                         if(count > resp_size) {
 
 517                                 resp_save->flags = TYPHOON_RESP_ERROR;
 
 522                         len = count * sizeof(*resp);
 
 523                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
 
 524                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
 
 525                                 len = RESPONSE_RING_SIZE - cleared;
 
 528                         memcpy(resp_save, resp, len);
 
 529                         if(unlikely(wrap_len)) {
 
 530                                 resp_save += len / sizeof(*resp);
 
 531                                 memcpy(resp_save, base, wrap_len);
 
 535                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
 
 536                         typhoon_media_status(tp->dev, resp);
 
 537                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
 
 540                         printk(KERN_ERR "%s: dumping unexpected response "
 
 541                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
 
 542                                tp->name, le16_to_cpu(resp->cmd),
 
 543                                resp->numDesc, resp->flags,
 
 544                                le16_to_cpu(resp->parm1),
 
 545                                le32_to_cpu(resp->parm2),
 
 546                                le32_to_cpu(resp->parm3));
 
 550                 typhoon_inc_resp_index(&cleared, count);
 
 553         indexes->respCleared = cpu_to_le32(cleared);
 
 555         return (resp_save == NULL);
 
 559 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
 
 561         /* this works for all descriptors but rx_desc, as they are a
 
 562          * different size than the cmd_desc -- everyone else is the same
 
 564         lastWrite /= sizeof(struct cmd_desc);
 
 565         lastRead /= sizeof(struct cmd_desc);
 
 566         return (ringSize + lastRead - lastWrite - 1) % ringSize;
 
 570 typhoon_num_free_cmd(struct typhoon *tp)
 
 572         int lastWrite = tp->cmdRing.lastWrite;
 
 573         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
 
 575         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
 
 579 typhoon_num_free_resp(struct typhoon *tp)
 
 581         int respReady = le32_to_cpu(tp->indexes->respReady);
 
 582         int respCleared = le32_to_cpu(tp->indexes->respCleared);
 
 584         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
 
 588 typhoon_num_free_tx(struct transmit_ring *ring)
 
 590         /* if we start using the Hi Tx ring, this needs updating */
 
 591         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
 
 595 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
 
 596                       int num_resp, struct resp_desc *resp)
 
 598         struct typhoon_indexes *indexes = tp->indexes;
 
 599         struct basic_ring *ring = &tp->cmdRing;
 
 600         struct resp_desc local_resp;
 
 603         int freeCmd, freeResp;
 
 606         spin_lock(&tp->command_lock);
 
 608         freeCmd = typhoon_num_free_cmd(tp);
 
 609         freeResp = typhoon_num_free_resp(tp);
 
 611         if(freeCmd < num_cmd || freeResp < num_resp) {
 
 612                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
 
 613                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
 
 619         if(cmd->flags & TYPHOON_CMD_RESPOND) {
 
 620                 /* If we're expecting a response, but the caller hasn't given
 
 621                  * us a place to put it, we'll provide one.
 
 623                 tp->awaiting_resp = 1;
 
 631         len = num_cmd * sizeof(*cmd);
 
 632         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
 
 633                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
 
 634                 len = COMMAND_RING_SIZE - ring->lastWrite;
 
 637         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
 
 638         if(unlikely(wrap_len)) {
 
 639                 struct cmd_desc *wrap_ptr = cmd;
 
 640                 wrap_ptr += len / sizeof(*cmd);
 
 641                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
 
 644         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
 
 646         /* "I feel a presence... another warrior is on the the mesa."
 
 649         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
 
 650         typhoon_post_pci_writes(tp->ioaddr);
 
 652         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
 
 655         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
 
 656          * preempt or do anything other than take interrupts. So, don't
 
 657          * wait for a response unless you have to.
 
 659          * I've thought about trying to sleep here, but we're called
 
 660          * from many contexts that don't allow that. Also, given the way
 
 661          * 3Com has implemented irq coalescing, we would likely timeout --
 
 662          * this has been observed in real life!
 
 664          * The big killer is we have to wait to get stats from the card,
 
 665          * though we could go to a periodic refresh of those if we don't
 
 666          * mind them getting somewhat stale. The rest of the waiting
 
 667          * commands occur during open/close/suspend/resume, so they aren't
 
 668          * time critical. Creating SAs in the future will also have to
 
 672         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
 
 673                 if(indexes->respCleared != indexes->respReady)
 
 674                         got_resp = typhoon_process_response(tp, num_resp,
 
 676                 udelay(TYPHOON_UDELAY);
 
 684         /* Collect the error response even if we don't care about the
 
 685          * rest of the response
 
 687         if(resp->flags & TYPHOON_RESP_ERROR)
 
 691         if(tp->awaiting_resp) {
 
 692                 tp->awaiting_resp = 0;
 
 695                 /* Ugh. If a response was added to the ring between
 
 696                  * the call to typhoon_process_response() and the clearing
 
 697                  * of tp->awaiting_resp, we could have missed the interrupt
 
 698                  * and it could hang in the ring an indeterminate amount of
 
 699                  * time. So, check for it, and interrupt ourselves if this
 
 702                 if(indexes->respCleared != indexes->respReady)
 
 703                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
 
 706         spin_unlock(&tp->command_lock);
 
 711 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 
 713         struct typhoon *tp = netdev_priv(dev);
 
 714         struct cmd_desc xp_cmd;
 
 717         spin_lock_bh(&tp->state_lock);
 
 718         if(!tp->vlgrp != !grp) {
 
 719                 /* We've either been turned on for the first time, or we've
 
 720                  * been turned off. Update the 3XP.
 
 723                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
 
 725                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
 
 727                 /* If the interface is up, the runtime is running -- and we
 
 728                  * must be up for the vlan core to call us.
 
 730                  * Do the command outside of the spin lock, as it is slow.
 
 732                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
 
 733                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
 
 734                 xp_cmd.parm2 = tp->offload;
 
 735                 xp_cmd.parm3 = tp->offload;
 
 736                 spin_unlock_bh(&tp->state_lock);
 
 737                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
 739                         printk("%s: vlan offload error %d\n", tp->name, -err);
 
 740                 spin_lock_bh(&tp->state_lock);
 
 743         /* now make the change visible */
 
 745         spin_unlock_bh(&tp->state_lock);
 
 749 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
 751         struct typhoon *tp = netdev_priv(dev);
 
 752         spin_lock_bh(&tp->state_lock);
 
 754                 tp->vlgrp->vlan_devices[vid] = NULL;
 
 755         spin_unlock_bh(&tp->state_lock);
 
 759 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
 
 762         struct tcpopt_desc *tcpd;
 
 763         u32 tcpd_offset = ring_dma;
 
 765         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
 
 766         tcpd_offset += txRing->lastWrite;
 
 767         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
 
 768         typhoon_inc_tx_index(&txRing->lastWrite, 1);
 
 770         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
 
 772         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
 
 773         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
 
 774         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
 
 775         tcpd->bytesTx = cpu_to_le32(skb->len);
 
 780 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
 
 782         struct typhoon *tp = netdev_priv(dev);
 
 783         struct transmit_ring *txRing;
 
 784         struct tx_desc *txd, *first_txd;
 
 788         /* we have two rings to choose from, but we only use txLo for now
 
 789          * If we start using the Hi ring as well, we'll need to update
 
 790          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
 
 791          * and TXHI_ENTIRES to match, as well as update the TSO code below
 
 792          * to get the right DMA address
 
 794         txRing = &tp->txLoRing;
 
 796         /* We need one descriptor for each fragment of the sk_buff, plus the
 
 797          * one for the ->data area of it.
 
 799          * The docs say a maximum of 16 fragment descriptors per TCP option
 
 800          * descriptor, then make a new packet descriptor and option descriptor
 
 801          * for the next 16 fragments. The engineers say just an option
 
 802          * descriptor is needed. I've tested up to 26 fragments with a single
 
 803          * packet descriptor/option descriptor combo, so I use that for now.
 
 805          * If problems develop with TSO, check this first.
 
 807         numDesc = skb_shinfo(skb)->nr_frags + 1;
 
 808         if(skb_tso_size(skb))
 
 811         /* When checking for free space in the ring, we need to also
 
 812          * account for the initial Tx descriptor, and we always must leave
 
 813          * at least one descriptor unused in the ring so that it doesn't
 
 814          * wrap and look empty.
 
 816          * The only time we should loop here is when we hit the race
 
 817          * between marking the queue awake and updating the cleared index.
 
 818          * Just loop and it will appear. This comes from the acenic driver.
 
 820         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
 
 823         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
 
 824         typhoon_inc_tx_index(&txRing->lastWrite, 1);
 
 826         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
 
 827         first_txd->numDesc = 0;
 
 829         first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
 
 830         first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
 
 831         first_txd->processFlags = 0;
 
 833         if(skb->ip_summed == CHECKSUM_HW) {
 
 834                 /* The 3XP will figure out if this is UDP/TCP */
 
 835                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
 
 836                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
 
 837                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
 
 840         if(vlan_tx_tag_present(skb)) {
 
 841                 first_txd->processFlags |=
 
 842                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
 
 843                 first_txd->processFlags |=
 
 844                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
 
 845                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
 
 848         if(skb_tso_size(skb)) {
 
 849                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
 
 850                 first_txd->numDesc++;
 
 852                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
 
 855         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
 
 856         typhoon_inc_tx_index(&txRing->lastWrite, 1);
 
 858         /* No need to worry about padding packet -- the firmware pads
 
 859          * it with zeros to ETH_ZLEN for us.
 
 861         if(skb_shinfo(skb)->nr_frags == 0) {
 
 862                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
 
 864                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 
 865                 txd->len = cpu_to_le16(skb->len);
 
 866                 txd->addr = cpu_to_le32(skb_dma);
 
 868                 first_txd->numDesc++;
 
 872                 len = skb_headlen(skb);
 
 873                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
 
 875                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 
 876                 txd->len = cpu_to_le16(len);
 
 877                 txd->addr = cpu_to_le32(skb_dma);
 
 879                 first_txd->numDesc++;
 
 881                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 
 882                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 885                         txd = (struct tx_desc *) (txRing->ringBase +
 
 887                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
 
 890                         frag_addr = (void *) page_address(frag->page) +
 
 892                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
 
 894                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 
 895                         txd->len = cpu_to_le16(len);
 
 896                         txd->addr = cpu_to_le32(skb_dma);
 
 898                         first_txd->numDesc++;
 
 905         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
 
 907         dev->trans_start = jiffies;
 
 909         /* If we don't have room to put the worst case packet on the
 
 910          * queue, then we must stop the queue. We need 2 extra
 
 911          * descriptors -- one to prevent ring wrap, and one for the
 
 914         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
 
 916         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
 
 917                 netif_stop_queue(dev);
 
 919                 /* A Tx complete IRQ could have gotten inbetween, making
 
 920                  * the ring free again. Only need to recheck here, since
 
 923                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
 
 924                         netif_wake_queue(dev);
 
 931 typhoon_set_rx_mode(struct net_device *dev)
 
 933         struct typhoon *tp = netdev_priv(dev);
 
 934         struct cmd_desc xp_cmd;
 
 938         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
 
 939         if(dev->flags & IFF_PROMISC) {
 
 940                 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
 
 942                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
 
 943         } else if((dev->mc_count > multicast_filter_limit) ||
 
 944                   (dev->flags & IFF_ALLMULTI)) {
 
 945                 /* Too many to match, or accept all multicasts. */
 
 946                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
 
 947         } else if(dev->mc_count) {
 
 948                 struct dev_mc_list *mclist;
 
 951                 memset(mc_filter, 0, sizeof(mc_filter));
 
 952                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
 
 953                     i++, mclist = mclist->next) {
 
 954                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
 
 955                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
 
 958                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
 
 959                                          TYPHOON_CMD_SET_MULTICAST_HASH);
 
 960                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
 
 961                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
 
 962                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
 
 963                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
 965                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
 
 968         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
 
 969         xp_cmd.parm1 = filter;
 
 970         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
 974 typhoon_do_get_stats(struct typhoon *tp)
 
 976         struct net_device_stats *stats = &tp->stats;
 
 977         struct net_device_stats *saved = &tp->stats_saved;
 
 978         struct cmd_desc xp_cmd;
 
 979         struct resp_desc xp_resp[7];
 
 980         struct stats_resp *s = (struct stats_resp *) xp_resp;
 
 983         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
 
 984         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
 
 988         /* 3Com's Linux driver uses txMultipleCollisions as it's
 
 989          * collisions value, but there is some other collision info as well...
 
 991          * The extra status reported would be a good candidate for
 
 992          * ethtool_ops->get_{strings,stats}()
 
 994         stats->tx_packets = le32_to_cpu(s->txPackets);
 
 995         stats->tx_bytes = le32_to_cpu(s->txBytes);
 
 996         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
 
 997         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
 
 998         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
 
 999         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
 
1000         stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
 
1001         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
 
1002         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
 
1003                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
 
1004         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
 
1005         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
 
1006         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
 
1007                         SPEED_100 : SPEED_10;
 
1008         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
 
1009                         DUPLEX_FULL : DUPLEX_HALF;
 
1011         /* add in the saved statistics
 
1013         stats->tx_packets += saved->tx_packets;
 
1014         stats->tx_bytes += saved->tx_bytes;
 
1015         stats->tx_errors += saved->tx_errors;
 
1016         stats->collisions += saved->collisions;
 
1017         stats->rx_packets += saved->rx_packets;
 
1018         stats->rx_bytes += saved->rx_bytes;
 
1019         stats->rx_fifo_errors += saved->rx_fifo_errors;
 
1020         stats->rx_errors += saved->rx_errors;
 
1021         stats->rx_crc_errors += saved->rx_crc_errors;
 
1022         stats->rx_length_errors += saved->rx_length_errors;
 
1027 static struct net_device_stats *
 
1028 typhoon_get_stats(struct net_device *dev)
 
1030         struct typhoon *tp = netdev_priv(dev);
 
1031         struct net_device_stats *stats = &tp->stats;
 
1032         struct net_device_stats *saved = &tp->stats_saved;
 
1035         if(tp->card_state == Sleeping)
 
1038         if(typhoon_do_get_stats(tp) < 0) {
 
1039                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
 
1047 typhoon_set_mac_address(struct net_device *dev, void *addr)
 
1049         struct sockaddr *saddr = (struct sockaddr *) addr;
 
1051         if(netif_running(dev))
 
1054         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
 
1059 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
1061         struct typhoon *tp = netdev_priv(dev);
 
1062         struct pci_dev *pci_dev = tp->pdev;
 
1063         struct cmd_desc xp_cmd;
 
1064         struct resp_desc xp_resp[3];
 
1067         if(tp->card_state == Sleeping) {
 
1068                 strcpy(info->fw_version, "Sleep image");
 
1070                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 
1071                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 
1072                         strcpy(info->fw_version, "Unknown runtime");
 
1074                         u32 sleep_ver = xp_resp[0].parm2;
 
1075                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
 
1076                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, 
 
1081         strcpy(info->driver, DRV_MODULE_NAME);
 
1082         strcpy(info->version, DRV_MODULE_VERSION);
 
1083         strcpy(info->bus_info, pci_name(pci_dev));
 
1087 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1089         struct typhoon *tp = netdev_priv(dev);
 
1091         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
 
1094         switch (tp->xcvr_select) {
 
1095         case TYPHOON_XCVR_10HALF:
 
1096                 cmd->advertising = ADVERTISED_10baseT_Half;
 
1098         case TYPHOON_XCVR_10FULL:
 
1099                 cmd->advertising = ADVERTISED_10baseT_Full;
 
1101         case TYPHOON_XCVR_100HALF:
 
1102                 cmd->advertising = ADVERTISED_100baseT_Half;
 
1104         case TYPHOON_XCVR_100FULL:
 
1105                 cmd->advertising = ADVERTISED_100baseT_Full;
 
1107         case TYPHOON_XCVR_AUTONEG:
 
1108                 cmd->advertising = ADVERTISED_10baseT_Half |
 
1109                                             ADVERTISED_10baseT_Full |
 
1110                                             ADVERTISED_100baseT_Half |
 
1111                                             ADVERTISED_100baseT_Full |
 
1116         if(tp->capabilities & TYPHOON_FIBER) {
 
1117                 cmd->supported |= SUPPORTED_FIBRE;
 
1118                 cmd->advertising |= ADVERTISED_FIBRE;
 
1119                 cmd->port = PORT_FIBRE;
 
1121                 cmd->supported |= SUPPORTED_10baseT_Half |
 
1122                                         SUPPORTED_10baseT_Full |
 
1124                 cmd->advertising |= ADVERTISED_TP;
 
1125                 cmd->port = PORT_TP;
 
1128         /* need to get stats to make these link speed/duplex valid */
 
1129         typhoon_do_get_stats(tp);
 
1130         cmd->speed = tp->speed;
 
1131         cmd->duplex = tp->duplex;
 
1132         cmd->phy_address = 0;
 
1133         cmd->transceiver = XCVR_INTERNAL;
 
1134         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
 
1135                 cmd->autoneg = AUTONEG_ENABLE;
 
1137                 cmd->autoneg = AUTONEG_DISABLE;
 
1145 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1147         struct typhoon *tp = netdev_priv(dev);
 
1148         struct cmd_desc xp_cmd;
 
1153         if(cmd->autoneg == AUTONEG_ENABLE) {
 
1154                 xcvr = TYPHOON_XCVR_AUTONEG;
 
1156                 if(cmd->duplex == DUPLEX_HALF) {
 
1157                         if(cmd->speed == SPEED_10)
 
1158                                 xcvr = TYPHOON_XCVR_10HALF;
 
1159                         else if(cmd->speed == SPEED_100)
 
1160                                 xcvr = TYPHOON_XCVR_100HALF;
 
1163                 } else if(cmd->duplex == DUPLEX_FULL) {
 
1164                         if(cmd->speed == SPEED_10)
 
1165                                 xcvr = TYPHOON_XCVR_10FULL;
 
1166                         else if(cmd->speed == SPEED_100)
 
1167                                 xcvr = TYPHOON_XCVR_100FULL;
 
1174         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
 
1175         xp_cmd.parm1 = cpu_to_le16(xcvr);
 
1176         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1180         tp->xcvr_select = xcvr;
 
1181         if(cmd->autoneg == AUTONEG_ENABLE) {
 
1182                 tp->speed = 0xff;       /* invalid */
 
1183                 tp->duplex = 0xff;      /* invalid */
 
1185                 tp->speed = cmd->speed;
 
1186                 tp->duplex = cmd->duplex;
 
1194 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
1196         struct typhoon *tp = netdev_priv(dev);
 
1198         wol->supported = WAKE_PHY | WAKE_MAGIC;
 
1200         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
 
1201                 wol->wolopts |= WAKE_PHY;
 
1202         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
 
1203                 wol->wolopts |= WAKE_MAGIC;
 
1204         memset(&wol->sopass, 0, sizeof(wol->sopass));
 
1208 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
1210         struct typhoon *tp = netdev_priv(dev);
 
1212         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
 
1216         if(wol->wolopts & WAKE_PHY)
 
1217                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
 
1218         if(wol->wolopts & WAKE_MAGIC)
 
1219                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
 
1225 typhoon_get_rx_csum(struct net_device *dev)
 
1227         /* For now, we don't allow turning off RX checksums.
 
1233 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 
1235         ering->rx_max_pending = RXENT_ENTRIES;
 
1236         ering->rx_mini_max_pending = 0;
 
1237         ering->rx_jumbo_max_pending = 0;
 
1238         ering->tx_max_pending = TXLO_ENTRIES - 1;
 
1240         ering->rx_pending = RXENT_ENTRIES;
 
1241         ering->rx_mini_pending = 0;
 
1242         ering->rx_jumbo_pending = 0;
 
1243         ering->tx_pending = TXLO_ENTRIES - 1;
 
1246 static struct ethtool_ops typhoon_ethtool_ops = {
 
1247         .get_settings           = typhoon_get_settings,
 
1248         .set_settings           = typhoon_set_settings,
 
1249         .get_drvinfo            = typhoon_get_drvinfo,
 
1250         .get_wol                = typhoon_get_wol,
 
1251         .set_wol                = typhoon_set_wol,
 
1252         .get_link               = ethtool_op_get_link,
 
1253         .get_rx_csum            = typhoon_get_rx_csum,
 
1254         .get_tx_csum            = ethtool_op_get_tx_csum,
 
1255         .set_tx_csum            = ethtool_op_set_tx_csum,
 
1256         .get_sg                 = ethtool_op_get_sg,
 
1257         .set_sg                 = ethtool_op_set_sg,
 
1258         .get_tso                = ethtool_op_get_tso,
 
1259         .set_tso                = ethtool_op_set_tso,
 
1260         .get_ringparam          = typhoon_get_ringparam,
 
1264 typhoon_wait_interrupt(void __iomem *ioaddr)
 
1268         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 
1269                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
 
1270                    TYPHOON_INTR_BOOTCMD)
 
1272                 udelay(TYPHOON_UDELAY);
 
1278         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
 
1282 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
 
1285 typhoon_init_interface(struct typhoon *tp)
 
1287         struct typhoon_interface *iface = &tp->shared->iface;
 
1288         dma_addr_t shared_dma;
 
1290         memset(tp->shared, 0, sizeof(struct typhoon_shared));
 
1292         /* The *Hi members of iface are all init'd to zero by the memset().
 
1294         shared_dma = tp->shared_dma + shared_offset(indexes);
 
1295         iface->ringIndex = cpu_to_le32(shared_dma);
 
1297         shared_dma = tp->shared_dma + shared_offset(txLo);
 
1298         iface->txLoAddr = cpu_to_le32(shared_dma);
 
1299         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
 
1301         shared_dma = tp->shared_dma + shared_offset(txHi);
 
1302         iface->txHiAddr = cpu_to_le32(shared_dma);
 
1303         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
 
1305         shared_dma = tp->shared_dma + shared_offset(rxBuff);
 
1306         iface->rxBuffAddr = cpu_to_le32(shared_dma);
 
1307         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
 
1308                                         sizeof(struct rx_free));
 
1310         shared_dma = tp->shared_dma + shared_offset(rxLo);
 
1311         iface->rxLoAddr = cpu_to_le32(shared_dma);
 
1312         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
 
1314         shared_dma = tp->shared_dma + shared_offset(rxHi);
 
1315         iface->rxHiAddr = cpu_to_le32(shared_dma);
 
1316         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
 
1318         shared_dma = tp->shared_dma + shared_offset(cmd);
 
1319         iface->cmdAddr = cpu_to_le32(shared_dma);
 
1320         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
 
1322         shared_dma = tp->shared_dma + shared_offset(resp);
 
1323         iface->respAddr = cpu_to_le32(shared_dma);
 
1324         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
 
1326         shared_dma = tp->shared_dma + shared_offset(zeroWord);
 
1327         iface->zeroAddr = cpu_to_le32(shared_dma);
 
1329         tp->indexes = &tp->shared->indexes;
 
1330         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
 
1331         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
 
1332         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
 
1333         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
 
1334         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
 
1335         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
 
1336         tp->respRing.ringBase = (u8 *) tp->shared->resp;
 
1338         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
 
1339         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
 
1341         tp->txlo_dma_addr = iface->txLoAddr;
 
1342         tp->card_state = Sleeping;
 
1345         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
 
1346         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
 
1348         spin_lock_init(&tp->command_lock);
 
1349         spin_lock_init(&tp->state_lock);
 
1353 typhoon_init_rings(struct typhoon *tp)
 
1355         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
 
1357         tp->txLoRing.lastWrite = 0;
 
1358         tp->txHiRing.lastWrite = 0;
 
1359         tp->rxLoRing.lastWrite = 0;
 
1360         tp->rxHiRing.lastWrite = 0;
 
1361         tp->rxBuffRing.lastWrite = 0;
 
1362         tp->cmdRing.lastWrite = 0;
 
1363         tp->cmdRing.lastWrite = 0;
 
1365         tp->txLoRing.lastRead = 0;
 
1366         tp->txHiRing.lastRead = 0;
 
1370 typhoon_download_firmware(struct typhoon *tp)
 
1372         void __iomem *ioaddr = tp->ioaddr;
 
1373         struct pci_dev *pdev = tp->pdev;
 
1374         struct typhoon_file_header *fHdr;
 
1375         struct typhoon_section_header *sHdr;
 
1378         dma_addr_t dpage_dma;
 
1391         fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
 
1392         image_data = (u8 *) fHdr;
 
1394         if(memcmp(fHdr->tag, "TYPHOON", 8)) {
 
1395                 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
 
1399         /* Cannot just map the firmware image using pci_map_single() as
 
1400          * the firmware is part of the kernel/module image, so we allocate
 
1401          * some consistent memory to copy the sections into, as it is simpler,
 
1402          * and short-lived. If we ever split out and require a userland
 
1403          * firmware loader, then we can revisit this.
 
1406         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
 
1408                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
 
1412         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
 
1413         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
 
1414                ioaddr + TYPHOON_REG_INTR_ENABLE);
 
1415         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
 
1416         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
 
1417                ioaddr + TYPHOON_REG_INTR_MASK);
 
1420         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 
1421                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
 
1425         numSections = le32_to_cpu(fHdr->numSections);
 
1426         load_addr = le32_to_cpu(fHdr->startAddr);
 
1428         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
 
1429         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
 
1430         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
 
1431         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
 
1432         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
 
1433         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
 
1434         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
 
1435         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
 
1436         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
 
1437         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
 
1438         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
 
1439         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
 
1440         typhoon_post_pci_writes(ioaddr);
 
1441         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
 
1443         image_data += sizeof(struct typhoon_file_header);
 
1445         /* The ioread32() in typhoon_wait_interrupt() will force the
 
1446          * last write to the command register to post, so
 
1447          * we don't need a typhoon_post_pci_writes() after it.
 
1449         for(i = 0; i < numSections; i++) {
 
1450                 sHdr = (struct typhoon_section_header *) image_data;
 
1451                 image_data += sizeof(struct typhoon_section_header);
 
1452                 load_addr = le32_to_cpu(sHdr->startAddr);
 
1453                 section_len = le32_to_cpu(sHdr->len);
 
1455                 while(section_len) {
 
1456                         len = min_t(u32, section_len, PAGE_SIZE);
 
1458                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
 
1459                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 
1460                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 
1461                                 printk(KERN_ERR "%s: segment ready timeout\n",
 
1466                         /* Do an pseudo IPv4 checksum on the data -- first
 
1467                          * need to convert each u16 to cpu order before
 
1468                          * summing. Fortunately, due to the properties of
 
1469                          * the checksum, we can do this once, at the end.
 
1471                         csum = csum_partial_copy_nocheck(image_data, dpage,
 
1473                         csum = csum_fold(csum);
 
1474                         csum = le16_to_cpu(csum);
 
1476                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
 
1477                         iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
 
1478                         iowrite32(load_addr,
 
1479                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
 
1480                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
 
1481                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
 
1482                         typhoon_post_pci_writes(ioaddr);
 
1483                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
 
1484                                ioaddr + TYPHOON_REG_COMMAND);
 
1492         if(typhoon_wait_interrupt(ioaddr) < 0 ||
 
1493            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 
1494            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 
1495                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
 
1499         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
 
1501         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 
1502                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
 
1503                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
 
1510         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
 
1511         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
 
1513         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
 
1520 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
 
1522         void __iomem *ioaddr = tp->ioaddr;
 
1524         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
 
1525                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
 
1529         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
 
1530         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
 
1531         typhoon_post_pci_writes(ioaddr);
 
1532         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
 
1533                                 ioaddr + TYPHOON_REG_COMMAND);
 
1535         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
 
1536                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
 
1537                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
 
1541         /* Clear the Transmit and Command ready registers
 
1543         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
 
1544         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
 
1545         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
 
1546         typhoon_post_pci_writes(ioaddr);
 
1547         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
 
1556 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
 
1557                         volatile u32 * index)
 
1559         u32 lastRead = txRing->lastRead;
 
1565         while(lastRead != le32_to_cpu(*index)) {
 
1566                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
 
1567                 type = tx->flags & TYPHOON_TYPE_MASK;
 
1569                 if(type == TYPHOON_TX_DESC) {
 
1570                         /* This tx_desc describes a packet.
 
1572                         unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
 
1573                         struct sk_buff *skb = (struct sk_buff *) ptr;
 
1574                         dev_kfree_skb_irq(skb);
 
1575                 } else if(type == TYPHOON_FRAG_DESC) {
 
1576                         /* This tx_desc describes a memory mapping. Free it.
 
1578                         skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
 
1579                         dma_len = le16_to_cpu(tx->len);
 
1580                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
 
1585                 typhoon_inc_tx_index(&lastRead, 1);
 
1592 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
 
1593                         volatile u32 * index)
 
1596         int numDesc = MAX_SKB_FRAGS + 1;
 
1598         /* This will need changing if we start to use the Hi Tx ring. */
 
1599         lastRead = typhoon_clean_tx(tp, txRing, index);
 
1600         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
 
1601                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
 
1602                 netif_wake_queue(tp->dev);
 
1604         txRing->lastRead = lastRead;
 
1609 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
 
1611         struct typhoon_indexes *indexes = tp->indexes;
 
1612         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
 
1613         struct basic_ring *ring = &tp->rxBuffRing;
 
1616         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
 
1617                                 indexes->rxBuffCleared) {
 
1618                 /* no room in ring, just drop the skb
 
1620                 dev_kfree_skb_any(rxb->skb);
 
1625         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
 
1626         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
 
1628         r->physAddr = cpu_to_le32(rxb->dma_addr);
 
1630         /* Tell the card about it */
 
1632         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
 
1636 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
 
1638         struct typhoon_indexes *indexes = tp->indexes;
 
1639         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
 
1640         struct basic_ring *ring = &tp->rxBuffRing;
 
1642         struct sk_buff *skb;
 
1643         dma_addr_t dma_addr;
 
1647         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
 
1648                                 indexes->rxBuffCleared)
 
1651         skb = dev_alloc_skb(PKT_BUF_SZ);
 
1656         /* Please, 3com, fix the firmware to allow DMA to a unaligned
 
1657          * address! Pretty please?
 
1659         skb_reserve(skb, 2);
 
1663         dma_addr = pci_map_single(tp->pdev, skb->data,
 
1664                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 
1666         /* Since no card does 64 bit DAC, the high bits will never
 
1669         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
 
1670         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
 
1672         r->physAddr = cpu_to_le32(dma_addr);
 
1674         rxb->dma_addr = dma_addr;
 
1676         /* Tell the card about it */
 
1678         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
 
1683 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
 
1684            volatile u32 * cleared, int budget)
 
1687         struct sk_buff *skb, *new_skb;
 
1688         struct rxbuff_ent *rxb;
 
1689         dma_addr_t dma_addr;
 
1698         local_ready = le32_to_cpu(*ready);
 
1699         rxaddr = le32_to_cpu(*cleared);
 
1700         while(rxaddr != local_ready && budget > 0) {
 
1701                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
 
1703                 rxb = &tp->rxbuffers[idx];
 
1705                 dma_addr = rxb->dma_addr;
 
1707                 typhoon_inc_rx_index(&rxaddr, 1);
 
1709                 if(rx->flags & TYPHOON_RX_ERROR) {
 
1710                         typhoon_recycle_rx_skb(tp, idx);
 
1714                 pkt_len = le16_to_cpu(rx->frameLen);
 
1716                 if(pkt_len < rx_copybreak &&
 
1717                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 
1718                         new_skb->dev = tp->dev;
 
1719                         skb_reserve(new_skb, 2);
 
1720                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
 
1722                                                     PCI_DMA_FROMDEVICE);
 
1723                         eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
 
1724                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
 
1726                                                        PCI_DMA_FROMDEVICE);
 
1727                         skb_put(new_skb, pkt_len);
 
1728                         typhoon_recycle_rx_skb(tp, idx);
 
1731                         skb_put(new_skb, pkt_len);
 
1732                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
 
1733                                        PCI_DMA_FROMDEVICE);
 
1734                         typhoon_alloc_rx_skb(tp, idx);
 
1736                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
 
1737                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
 
1738                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
 
1740                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
 
1742                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
 
1743                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1745                         new_skb->ip_summed = CHECKSUM_NONE;
 
1747                 spin_lock(&tp->state_lock);
 
1748                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
 
1749                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
 
1750                                                  ntohl(rx->vlanTag) & 0xffff);
 
1752                         netif_receive_skb(new_skb);
 
1753                 spin_unlock(&tp->state_lock);
 
1755                 tp->dev->last_rx = jiffies;
 
1759         *cleared = cpu_to_le32(rxaddr);
 
1765 typhoon_fill_free_ring(struct typhoon *tp)
 
1769         for(i = 0; i < RXENT_ENTRIES; i++) {
 
1770                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
 
1773                 if(typhoon_alloc_rx_skb(tp, i) < 0)
 
1779 typhoon_poll(struct net_device *dev, int *total_budget)
 
1781         struct typhoon *tp = netdev_priv(dev);
 
1782         struct typhoon_indexes *indexes = tp->indexes;
 
1783         int orig_budget = *total_budget;
 
1784         int budget, work_done, done;
 
1787         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
 
1788                         typhoon_process_response(tp, 0, NULL);
 
1790         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
 
1791                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
 
1793         if(orig_budget > dev->quota)
 
1794                 orig_budget = dev->quota;
 
1796         budget = orig_budget;
 
1800         if(indexes->rxHiCleared != indexes->rxHiReady) {
 
1801                 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
 
1802                                         &indexes->rxHiCleared, budget);
 
1803                 budget -= work_done;
 
1806         if(indexes->rxLoCleared != indexes->rxLoReady) {
 
1807                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
 
1808                                         &indexes->rxLoCleared, budget);
 
1812                 *total_budget -= work_done;
 
1813                 dev->quota -= work_done;
 
1815                 if(work_done >= orig_budget)
 
1819         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
 
1820                 /* rxBuff ring is empty, try to fill it. */
 
1821                 typhoon_fill_free_ring(tp);
 
1825                 netif_rx_complete(dev);
 
1826                 iowrite32(TYPHOON_INTR_NONE,
 
1827                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
 
1828                 typhoon_post_pci_writes(tp->ioaddr);
 
1831         return (done ? 0 : 1);
 
1835 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
 
1837         struct net_device *dev = (struct net_device *) dev_instance;
 
1838         struct typhoon *tp = dev->priv;
 
1839         void __iomem *ioaddr = tp->ioaddr;
 
1842         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 
1843         if(!(intr_status & TYPHOON_INTR_HOST_INT))
 
1846         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
 
1848         if(netif_rx_schedule_prep(dev)) {
 
1849                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 
1850                 typhoon_post_pci_writes(ioaddr);
 
1851                 __netif_rx_schedule(dev);
 
1853                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
 
1860 typhoon_free_rx_rings(struct typhoon *tp)
 
1864         for(i = 0; i < RXENT_ENTRIES; i++) {
 
1865                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
 
1867                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
 
1868                                        PCI_DMA_FROMDEVICE);
 
1869                         dev_kfree_skb(rxb->skb);
 
1876 typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
 
1878         struct pci_dev *pdev = tp->pdev;
 
1879         void __iomem *ioaddr = tp->ioaddr;
 
1880         struct cmd_desc xp_cmd;
 
1883         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
 
1884         xp_cmd.parm1 = events;
 
1885         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1887                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
 
1892         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
 
1893         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1895                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
 
1900         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
 
1903         /* Since we cannot monitor the status of the link while sleeping,
 
1904          * tell the world it went away.
 
1906         netif_carrier_off(tp->dev);
 
1908         pci_enable_wake(tp->pdev, state, 1);
 
1909         pci_disable_device(pdev);
 
1910         return pci_set_power_state(pdev, state);
 
1914 typhoon_wakeup(struct typhoon *tp, int wait_type)
 
1916         struct pci_dev *pdev = tp->pdev;
 
1917         void __iomem *ioaddr = tp->ioaddr;
 
1919         pci_set_power_state(pdev, PCI_D0);
 
1920         pci_restore_state(pdev);
 
1922         /* Post 2.x.x versions of the Sleep Image require a reset before
 
1923          * we can download the Runtime Image. But let's not make users of
 
1924          * the old firmware pay for the reset.
 
1926         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
 
1927         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
 
1928                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
 
1929                 return typhoon_reset(ioaddr, wait_type);
 
1935 typhoon_start_runtime(struct typhoon *tp)
 
1937         struct net_device *dev = tp->dev;
 
1938         void __iomem *ioaddr = tp->ioaddr;
 
1939         struct cmd_desc xp_cmd;
 
1942         typhoon_init_rings(tp);
 
1943         typhoon_fill_free_ring(tp);
 
1945         err = typhoon_download_firmware(tp);
 
1947                 printk("%s: cannot load runtime on 3XP\n", tp->name);
 
1951         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 
1952                 printk("%s: cannot boot 3XP\n", tp->name);
 
1957         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
 
1958         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
 
1959         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1963         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
 
1964         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
 
1965         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
 
1966         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1970         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
 
1971          * us some more information on how to control it.
 
1973         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
 
1975         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1979         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
 
1980         xp_cmd.parm1 = tp->xcvr_select;
 
1981         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1985         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
 
1986         xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
 
1987         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1991         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
 
1992         spin_lock_bh(&tp->state_lock);
 
1993         xp_cmd.parm2 = tp->offload;
 
1994         xp_cmd.parm3 = tp->offload;
 
1995         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
1996         spin_unlock_bh(&tp->state_lock);
 
2000         typhoon_set_rx_mode(dev);
 
2002         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
 
2003         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
2007         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
 
2008         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
2012         tp->card_state = Running;
 
2015         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
 
2016         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
 
2017         typhoon_post_pci_writes(ioaddr);
 
2022         typhoon_reset(ioaddr, WaitNoSleep);
 
2023         typhoon_free_rx_rings(tp);
 
2024         typhoon_init_rings(tp);
 
2029 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
 
2031         struct typhoon_indexes *indexes = tp->indexes;
 
2032         struct transmit_ring *txLo = &tp->txLoRing;
 
2033         void __iomem *ioaddr = tp->ioaddr;
 
2034         struct cmd_desc xp_cmd;
 
2037         /* Disable interrupts early, since we can't schedule a poll
 
2038          * when called with !netif_running(). This will be posted
 
2039          * when we force the posting of the command.
 
2041         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
 
2043         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
 
2044         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
2046         /* Wait 1/2 sec for any outstanding transmits to occur
 
2047          * We'll cleanup after the reset if this times out.
 
2049         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 
2050                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
 
2052                 udelay(TYPHOON_UDELAY);
 
2055         if(i == TYPHOON_WAIT_TIMEOUT)
 
2057                        "%s: halt timed out waiting for Tx to complete\n",
 
2060         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
 
2061         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
2063         /* save the statistics so when we bring the interface up again,
 
2064          * the values reported to userspace are correct.
 
2066         tp->card_state = Sleeping;
 
2068         typhoon_do_get_stats(tp);
 
2069         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
 
2071         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
 
2072         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 
2074         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
 
2075                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
 
2078         if(typhoon_reset(ioaddr, wait_type) < 0) {
 
2079                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
 
2083         /* cleanup any outstanding Tx packets */
 
2084         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
 
2085                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
 
2086                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
 
2093 typhoon_tx_timeout(struct net_device *dev)
 
2095         struct typhoon *tp = netdev_priv(dev);
 
2097         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
 
2098                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
 
2103         /* If we ever start using the Hi ring, it will need cleaning too */
 
2104         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
 
2105         typhoon_free_rx_rings(tp);
 
2107         if(typhoon_start_runtime(tp) < 0) {
 
2108                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
 
2113         netif_wake_queue(dev);
 
2117         /* Reset the hardware, and turn off carrier to avoid more timeouts */
 
2118         typhoon_reset(tp->ioaddr, NoWait);
 
2119         netif_carrier_off(dev);
 
2123 typhoon_open(struct net_device *dev)
 
2125         struct typhoon *tp = netdev_priv(dev);
 
2128         err = typhoon_wakeup(tp, WaitSleep);
 
2130                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
 
2134         err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
 
2139         err = typhoon_start_runtime(tp);
 
2143         netif_start_queue(dev);
 
2147         free_irq(dev->irq, dev);
 
2150         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 
2151                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
 
2153                 typhoon_reset(tp->ioaddr, NoWait);
 
2157         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) 
 
2158                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
 
2165 typhoon_close(struct net_device *dev)
 
2167         struct typhoon *tp = netdev_priv(dev);
 
2169         netif_stop_queue(dev);
 
2171         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
 
2172                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
 
2174         /* Make sure there is no irq handler running on a different CPU. */
 
2175         typhoon_synchronize_irq(dev->irq);
 
2176         free_irq(dev->irq, dev);
 
2178         typhoon_free_rx_rings(tp);
 
2179         typhoon_init_rings(tp);
 
2181         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
 
2182                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
 
2184         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
 
2185                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
 
2192 typhoon_resume(struct pci_dev *pdev)
 
2194         struct net_device *dev = pci_get_drvdata(pdev);
 
2195         struct typhoon *tp = netdev_priv(dev);
 
2197         /* If we're down, resume when we are upped.
 
2199         if(!netif_running(dev))
 
2202         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
 
2203                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
 
2208         if(typhoon_start_runtime(tp) < 0) {
 
2209                 printk(KERN_ERR "%s: critical: could not start runtime in "
 
2210                                 "resume\n", dev->name);
 
2214         netif_device_attach(dev);
 
2215         netif_start_queue(dev);
 
2219         typhoon_reset(tp->ioaddr, NoWait);
 
2224 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
 
2226         struct net_device *dev = pci_get_drvdata(pdev);
 
2227         struct typhoon *tp = netdev_priv(dev);
 
2228         struct cmd_desc xp_cmd;
 
2230         /* If we're down, we're already suspended.
 
2232         if(!netif_running(dev))
 
2235         spin_lock_bh(&tp->state_lock);
 
2236         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
 
2237                 spin_unlock_bh(&tp->state_lock);
 
2238                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
 
2242         spin_unlock_bh(&tp->state_lock);
 
2244         netif_device_detach(dev);
 
2246         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
 
2247                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
 
2251         typhoon_free_rx_rings(tp);
 
2252         typhoon_init_rings(tp);
 
2254         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 
2255                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
 
2259         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
 
2260         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
 
2261         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
 
2262         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 
2263                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
 
2268         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
 
2269         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
 
2270         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 
2271                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
 
2276         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
 
2277                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
 
2284         typhoon_resume(pdev);
 
2289 typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
 
2291         return pci_enable_wake(pdev, state, enable);
 
2295 static int __devinit
 
2296 typhoon_test_mmio(struct pci_dev *pdev)
 
2298         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
 
2305         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
 
2306                                 TYPHOON_STATUS_WAITING_FOR_HOST)
 
2309         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 
2310         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 
2311         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
 
2313         /* Ok, see if we can change our interrupt status register by
 
2314          * sending ourselves an interrupt. If so, then MMIO works.
 
2315          * The 50usec delay is arbitrary -- it could probably be smaller.
 
2317         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 
2318         if((val & TYPHOON_INTR_SELF) == 0) {
 
2319                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
 
2320                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 
2322                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 
2323                 if(val & TYPHOON_INTR_SELF)
 
2327         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 
2328         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 
2329         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
 
2330         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
 
2333         pci_iounmap(pdev, ioaddr);
 
2337                 printk(KERN_INFO PFX "falling back to port IO\n");
 
2341 static int __devinit
 
2342 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
2344         static int did_version = 0;
 
2345         struct net_device *dev;
 
2347         int card_id = (int) ent->driver_data;
 
2348         void __iomem *ioaddr;
 
2350         dma_addr_t shared_dma;
 
2351         struct cmd_desc xp_cmd;
 
2352         struct resp_desc xp_resp[3];
 
2357                 printk(KERN_INFO "%s", version);
 
2359         dev = alloc_etherdev(sizeof(*tp));
 
2361                 printk(ERR_PFX "%s: unable to alloc new net device\n",
 
2366         SET_MODULE_OWNER(dev);
 
2367         SET_NETDEV_DEV(dev, &pdev->dev);
 
2369         err = pci_enable_device(pdev);
 
2371                 printk(ERR_PFX "%s: unable to enable device\n",
 
2376         err = pci_set_mwi(pdev);
 
2378                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
 
2379                 goto error_out_disable;
 
2382         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 
2384                 printk(ERR_PFX "%s: No usable DMA configuration\n",
 
2389         /* sanity checks on IO and MMIO BARs
 
2391         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
 
2393                        "%s: region #1 not a PCI IO resource, aborting\n",
 
2398         if(pci_resource_len(pdev, 0) < 128) {
 
2399                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
 
2404         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
 
2406                        "%s: region #1 not a PCI MMIO resource, aborting\n",
 
2411         if(pci_resource_len(pdev, 1) < 128) {
 
2412                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
 
2418         err = pci_request_regions(pdev, "typhoon");
 
2420                 printk(ERR_PFX "%s: could not request regions\n",
 
2425         /* map our registers
 
2427         if(use_mmio != 0 && use_mmio != 1)
 
2428                 use_mmio = typhoon_test_mmio(pdev);
 
2430         ioaddr = pci_iomap(pdev, use_mmio, 128);
 
2432                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
 
2435                 goto error_out_regions;
 
2438         /* allocate pci dma space for rx and tx descriptor rings
 
2440         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
 
2443                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
 
2446                 goto error_out_remap;
 
2449         dev->irq = pdev->irq;
 
2450         tp = netdev_priv(dev);
 
2451         tp->shared = (struct typhoon_shared *) shared;
 
2452         tp->shared_dma = shared_dma;
 
2455         tp->ioaddr = ioaddr;
 
2456         tp->tx_ioaddr = ioaddr;
 
2460          * 1) Reset the adapter to clear any bad juju
 
2461          * 2) Reload the sleep image
 
2462          * 3) Boot the sleep image
 
2463          * 4) Get the hardware address.
 
2464          * 5) Put the card to sleep.
 
2466         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
 
2467                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
 
2472         /* Now that we've reset the 3XP and are sure it's not going to
 
2473          * write all over memory, enable bus mastering, and save our
 
2474          * state for resuming after a suspend.
 
2476         pci_set_master(pdev);
 
2477         pci_save_state(pdev);
 
2479         /* dev->name is not valid until we register, but we need to
 
2480          * use some common routines to initialize the card. So that those
 
2481          * routines print the right name, we keep our oun pointer to the name
 
2483         tp->name = pci_name(pdev);
 
2485         typhoon_init_interface(tp);
 
2486         typhoon_init_rings(tp);
 
2488         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 
2489                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
 
2492                 goto error_out_reset;
 
2495         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
 
2496         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
 
2497                 printk(ERR_PFX "%s: cannot read MAC address\n",
 
2500                 goto error_out_reset;
 
2503         *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
 
2504         *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
 
2506         if(!is_valid_ether_addr(dev->dev_addr)) {
 
2507                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
 
2508                        "aborting\n", pci_name(pdev));
 
2509                 goto error_out_reset;
 
2512         /* Read the Sleep Image version last, so the response is valid
 
2513          * later when we print out the version reported.
 
2515         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 
2516         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 
2517                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
 
2519                 goto error_out_reset;
 
2522         tp->capabilities = typhoon_card_info[card_id].capabilities;
 
2523         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
 
2525         /* Typhoon 1.0 Sleep Images return one response descriptor to the
 
2526          * READ_VERSIONS command. Those versions are OK after waking up
 
2527          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
 
2528          * seem to need a little extra help to get started. Since we don't
 
2529          * know how to nudge it along, just kick it.
 
2531         if(xp_resp[0].numDesc != 0)
 
2532                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
 
2534         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
 
2535                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
 
2538                 goto error_out_reset;
 
2541         /* The chip-specific entries in the device structure. */
 
2542         dev->open               = typhoon_open;
 
2543         dev->hard_start_xmit    = typhoon_start_tx;
 
2544         dev->stop               = typhoon_close;
 
2545         dev->set_multicast_list = typhoon_set_rx_mode;
 
2546         dev->tx_timeout         = typhoon_tx_timeout;
 
2547         dev->poll               = typhoon_poll;
 
2549         dev->watchdog_timeo     = TX_TIMEOUT;
 
2550         dev->get_stats          = typhoon_get_stats;
 
2551         dev->set_mac_address    = typhoon_set_mac_address;
 
2552         dev->vlan_rx_register   = typhoon_vlan_rx_register;
 
2553         dev->vlan_rx_kill_vid   = typhoon_vlan_rx_kill_vid;
 
2554         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
 
2556         /* We can handle scatter gather, up to 16 entries, and
 
2557          * we can do IP checksumming (only version 4, doh...)
 
2559         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 
2560         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 
2561         dev->features |= NETIF_F_TSO;
 
2563         if(register_netdev(dev) < 0)
 
2564                 goto error_out_reset;
 
2566         /* fixup our local name */
 
2567         tp->name = dev->name;
 
2569         pci_set_drvdata(pdev, dev);
 
2571         printk(KERN_INFO "%s: %s at %s 0x%lx, ",
 
2572                dev->name, typhoon_card_info[card_id].name,
 
2573                use_mmio ? "MMIO" : "IO", pci_resource_start(pdev, use_mmio));
 
2574         for(i = 0; i < 5; i++)
 
2575                 printk("%2.2x:", dev->dev_addr[i]);
 
2576         printk("%2.2x\n", dev->dev_addr[i]);
 
2578         /* xp_resp still contains the response to the READ_VERSIONS command.
 
2579          * For debugging, let the user know what version he has.
 
2581         if(xp_resp[0].numDesc == 0) {
 
2582                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
 
2583                  * of version is Month/Day of build.
 
2585                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
 
2586                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
 
2587                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
 
2589         } else if(xp_resp[0].numDesc == 2) {
 
2590                 /* This is the Typhoon 1.1+ type Sleep Image
 
2592                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
 
2593                 u8 *ver_string = (u8 *) &xp_resp[1];
 
2595                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
 
2596                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
 
2597                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
 
2600                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
 
2601                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
 
2602                         le32_to_cpu(xp_resp[0].parm2));
 
2608         typhoon_reset(ioaddr, NoWait);
 
2611         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
 
2612                             shared, shared_dma);
 
2614         pci_iounmap(pdev, ioaddr);
 
2616         pci_release_regions(pdev);
 
2618         pci_clear_mwi(pdev);
 
2620         pci_disable_device(pdev);
 
2627 static void __devexit
 
2628 typhoon_remove_one(struct pci_dev *pdev)
 
2630         struct net_device *dev = pci_get_drvdata(pdev);
 
2631         struct typhoon *tp = netdev_priv(dev);
 
2633         unregister_netdev(dev);
 
2634         pci_set_power_state(pdev, PCI_D0);
 
2635         pci_restore_state(pdev);
 
2636         typhoon_reset(tp->ioaddr, NoWait);
 
2637         pci_iounmap(pdev, tp->ioaddr);
 
2638         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
 
2639                             tp->shared, tp->shared_dma);
 
2640         pci_release_regions(pdev);
 
2641         pci_clear_mwi(pdev);
 
2642         pci_disable_device(pdev);
 
2643         pci_set_drvdata(pdev, NULL);
 
2647 static struct pci_driver typhoon_driver = {
 
2648         .name           = DRV_MODULE_NAME,
 
2649         .id_table       = typhoon_pci_tbl,
 
2650         .probe          = typhoon_init_one,
 
2651         .remove         = __devexit_p(typhoon_remove_one),
 
2653         .suspend        = typhoon_suspend,
 
2654         .resume         = typhoon_resume,
 
2655         .enable_wake    = typhoon_enable_wake,
 
2662         return pci_module_init(&typhoon_driver);
 
2666 typhoon_cleanup(void)
 
2668         pci_unregister_driver(&typhoon_driver);
 
2671 module_init(typhoon_init);
 
2672 module_exit(typhoon_cleanup);