qlge: Clean up link up processing.
[linux-2.6] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.9"
104 #define DRV_MODULE_RELDATE      "Mar 2, 2009"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107 #define FIRMWARE_NAME           "3com/typhoon.bin"
108
109 #include <linux/module.h>
110 #include <linux/kernel.h>
111 #include <linux/string.h>
112 #include <linux/timer.h>
113 #include <linux/errno.h>
114 #include <linux/ioport.h>
115 #include <linux/slab.h>
116 #include <linux/interrupt.h>
117 #include <linux/pci.h>
118 #include <linux/netdevice.h>
119 #include <linux/etherdevice.h>
120 #include <linux/skbuff.h>
121 #include <linux/mm.h>
122 #include <linux/init.h>
123 #include <linux/delay.h>
124 #include <linux/ethtool.h>
125 #include <linux/if_vlan.h>
126 #include <linux/crc32.h>
127 #include <linux/bitops.h>
128 #include <asm/processor.h>
129 #include <asm/io.h>
130 #include <asm/uaccess.h>
131 #include <linux/in6.h>
132 #include <linux/dma-mapping.h>
133 #include <linux/firmware.h>
134
135 #include "typhoon.h"
136
137 static char version[] __devinitdata =
138     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_FIRMWARE(FIRMWARE_NAME);
144 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
145 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
146                                "the buffer given back to the NIC. Default "
147                                "is 200.");
148 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
149                            "Default is to try MMIO and fallback to PIO.");
150 module_param(rx_copybreak, int, 0);
151 module_param(use_mmio, int, 0);
152
153 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
154 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
155 #undef NETIF_F_TSO
156 #endif
157
158 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
159 #error TX ring too small!
160 #endif
161
162 struct typhoon_card_info {
163         char *name;
164         int capabilities;
165 };
166
167 #define TYPHOON_CRYPTO_NONE             0x00
168 #define TYPHOON_CRYPTO_DES              0x01
169 #define TYPHOON_CRYPTO_3DES             0x02
170 #define TYPHOON_CRYPTO_VARIABLE         0x04
171 #define TYPHOON_FIBER                   0x08
172 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
173
174 enum typhoon_cards {
175         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
176         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
177         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
178         TYPHOON_FXM,
179 };
180
181 /* directly indexed by enum typhoon_cards, above */
182 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
183         { "3Com Typhoon (3C990-TX)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990-TX-95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990-TX-97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon (3C990SVR)",
190                 TYPHOON_CRYPTO_NONE},
191         { "3Com Typhoon (3CR990SVR95)",
192                 TYPHOON_CRYPTO_DES},
193         { "3Com Typhoon (3CR990SVR97)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
195         { "3Com Typhoon2 (3C990B-TX-M)",
196                 TYPHOON_CRYPTO_VARIABLE},
197         { "3Com Typhoon2 (3C990BSVR)",
198                 TYPHOON_CRYPTO_VARIABLE},
199         { "3Com Typhoon (3CR990-FX-95)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
201         { "3Com Typhoon (3CR990-FX-97)",
202                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
203         { "3Com Typhoon (3CR990-FX-95 Server)",
204                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
205         { "3Com Typhoon (3CR990-FX-97 Server)",
206                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
207         { "3Com Typhoon2 (3C990B-FX-97)",
208                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
209 };
210
211 /* Notes on the new subsystem numbering scheme:
212  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
213  * bit 4 indicates if this card has secured firmware (we don't support it)
214  * bit 8 indicates if this is a (0) copper or (1) fiber card
215  * bits 12-16 indicate card type: (0) client and (1) server
216  */
217 static struct pci_device_id typhoon_pci_tbl[] = {
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
225           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
229           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
233           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
235           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
237           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
238         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
240         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
242         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
244         { 0, }
245 };
246 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
247
248 /* Define the shared memory area
249  * Align everything the 3XP will normally be using.
250  * We'll need to move/align txHi if we start using that ring.
251  */
252 #define __3xp_aligned   ____cacheline_aligned
253 struct typhoon_shared {
254         struct typhoon_interface        iface;
255         struct typhoon_indexes          indexes                 __3xp_aligned;
256         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
257         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
258         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
259         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
260         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
261         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
262         u32                             zeroWord;
263         struct tx_desc                  txHi[TXHI_ENTRIES];
264 } __attribute__ ((packed));
265
266 struct rxbuff_ent {
267         struct sk_buff *skb;
268         dma_addr_t      dma_addr;
269 };
270
271 struct typhoon {
272         /* Tx cache line section */
273         struct transmit_ring    txLoRing        ____cacheline_aligned;
274         struct pci_dev *        tx_pdev;
275         void __iomem            *tx_ioaddr;
276         u32                     txlo_dma_addr;
277
278         /* Irq/Rx cache line section */
279         void __iomem            *ioaddr         ____cacheline_aligned;
280         struct typhoon_indexes *indexes;
281         u8                      awaiting_resp;
282         u8                      duplex;
283         u8                      speed;
284         u8                      card_state;
285         struct basic_ring       rxLoRing;
286         struct pci_dev *        pdev;
287         struct net_device *     dev;
288         struct napi_struct      napi;
289         spinlock_t              state_lock;
290         struct vlan_group *     vlgrp;
291         struct basic_ring       rxHiRing;
292         struct basic_ring       rxBuffRing;
293         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
294
295         /* general section */
296         spinlock_t              command_lock    ____cacheline_aligned;
297         struct basic_ring       cmdRing;
298         struct basic_ring       respRing;
299         struct net_device_stats stats;
300         struct net_device_stats stats_saved;
301         const char *            name;
302         struct typhoon_shared * shared;
303         dma_addr_t              shared_dma;
304         __le16                  xcvr_select;
305         __le16                  wol_events;
306         __le32                  offload;
307
308         /* unused stuff (future use) */
309         int                     capabilities;
310         struct transmit_ring    txHiRing;
311 };
312
313 enum completion_wait_values {
314         NoWait = 0, WaitNoSleep, WaitSleep,
315 };
316
317 /* These are the values for the typhoon.card_state variable.
318  * These determine where the statistics will come from in get_stats().
319  * The sleep image does not support the statistics we need.
320  */
321 enum state_values {
322         Sleeping = 0, Running,
323 };
324
325 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
326  * cannot pass a read, so this forces current writes to post.
327  */
328 #define typhoon_post_pci_writes(x) \
329         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
330
331 /* We'll wait up to six seconds for a reset, and half a second normally.
332  */
333 #define TYPHOON_UDELAY                  50
334 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
335 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
336 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
337
338 #if defined(NETIF_F_TSO)
339 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
340 #define TSO_NUM_DESCRIPTORS     2
341 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
342 #else
343 #define NETIF_F_TSO             0
344 #define skb_tso_size(x)         0
345 #define TSO_NUM_DESCRIPTORS     0
346 #define TSO_OFFLOAD_ON          0
347 #endif
348
349 static inline void
350 typhoon_inc_index(u32 *index, const int count, const int num_entries)
351 {
352         /* Increment a ring index -- we can use this for all rings execept
353          * the Rx rings, as they use different size descriptors
354          * otherwise, everything is the same size as a cmd_desc
355          */
356         *index += count * sizeof(struct cmd_desc);
357         *index %= num_entries * sizeof(struct cmd_desc);
358 }
359
360 static inline void
361 typhoon_inc_cmd_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, COMMAND_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_resp_index(u32 *index, const int count)
368 {
369         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_rxfree_index(u32 *index, const int count)
374 {
375         typhoon_inc_index(index, count, RXFREE_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_tx_index(u32 *index, const int count)
380 {
381         /* if we start using the Hi Tx ring, this needs updateing */
382         typhoon_inc_index(index, count, TXLO_ENTRIES);
383 }
384
385 static inline void
386 typhoon_inc_rx_index(u32 *index, const int count)
387 {
388         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
389         *index += count * sizeof(struct rx_desc);
390         *index %= RX_ENTRIES * sizeof(struct rx_desc);
391 }
392
393 static int
394 typhoon_reset(void __iomem *ioaddr, int wait_type)
395 {
396         int i, err = 0;
397         int timeout;
398
399         if(wait_type == WaitNoSleep)
400                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
401         else
402                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
403
404         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
406
407         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
408         typhoon_post_pci_writes(ioaddr);
409         udelay(1);
410         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
411
412         if(wait_type != NoWait) {
413                 for(i = 0; i < timeout; i++) {
414                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
415                            TYPHOON_STATUS_WAITING_FOR_HOST)
416                                 goto out;
417
418                         if(wait_type == WaitSleep)
419                                 schedule_timeout_uninterruptible(1);
420                         else
421                                 udelay(TYPHOON_UDELAY);
422                 }
423
424                 err = -ETIMEDOUT;
425         }
426
427 out:
428         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
430
431         /* The 3XP seems to need a little extra time to complete the load
432          * of the sleep image before we can reliably boot it. Failure to
433          * do this occasionally results in a hung adapter after boot in
434          * typhoon_init_one() while trying to read the MAC address or
435          * putting the card to sleep. 3Com's driver waits 5ms, but
436          * that seems to be overkill. However, if we can sleep, we might
437          * as well give it that much time. Otherwise, we'll give it 500us,
438          * which should be enough (I've see it work well at 100us, but still
439          * saw occasional problems.)
440          */
441         if(wait_type == WaitSleep)
442                 msleep(5);
443         else
444                 udelay(500);
445         return err;
446 }
447
448 static int
449 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
450 {
451         int i, err = 0;
452
453         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
454                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
455                         goto out;
456                 udelay(TYPHOON_UDELAY);
457         }
458
459         err = -ETIMEDOUT;
460
461 out:
462         return err;
463 }
464
465 static inline void
466 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
467 {
468         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
469                 netif_carrier_off(dev);
470         else
471                 netif_carrier_on(dev);
472 }
473
474 static inline void
475 typhoon_hello(struct typhoon *tp)
476 {
477         struct basic_ring *ring = &tp->cmdRing;
478         struct cmd_desc *cmd;
479
480         /* We only get a hello request if we've not sent anything to the
481          * card in a long while. If the lock is held, then we're in the
482          * process of issuing a command, so we don't need to respond.
483          */
484         if(spin_trylock(&tp->command_lock)) {
485                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
486                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
487
488                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
489                 smp_wmb();
490                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
491                 spin_unlock(&tp->command_lock);
492         }
493 }
494
495 static int
496 typhoon_process_response(struct typhoon *tp, int resp_size,
497                                 struct resp_desc *resp_save)
498 {
499         struct typhoon_indexes *indexes = tp->indexes;
500         struct resp_desc *resp;
501         u8 *base = tp->respRing.ringBase;
502         int count, len, wrap_len;
503         u32 cleared;
504         u32 ready;
505
506         cleared = le32_to_cpu(indexes->respCleared);
507         ready = le32_to_cpu(indexes->respReady);
508         while(cleared != ready) {
509                 resp = (struct resp_desc *)(base + cleared);
510                 count = resp->numDesc + 1;
511                 if(resp_save && resp->seqNo) {
512                         if(count > resp_size) {
513                                 resp_save->flags = TYPHOON_RESP_ERROR;
514                                 goto cleanup;
515                         }
516
517                         wrap_len = 0;
518                         len = count * sizeof(*resp);
519                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
520                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
521                                 len = RESPONSE_RING_SIZE - cleared;
522                         }
523
524                         memcpy(resp_save, resp, len);
525                         if(unlikely(wrap_len)) {
526                                 resp_save += len / sizeof(*resp);
527                                 memcpy(resp_save, base, wrap_len);
528                         }
529
530                         resp_save = NULL;
531                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
532                         typhoon_media_status(tp->dev, resp);
533                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
534                         typhoon_hello(tp);
535                 } else {
536                         printk(KERN_ERR "%s: dumping unexpected response "
537                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
538                                tp->name, le16_to_cpu(resp->cmd),
539                                resp->numDesc, resp->flags,
540                                le16_to_cpu(resp->parm1),
541                                le32_to_cpu(resp->parm2),
542                                le32_to_cpu(resp->parm3));
543                 }
544
545 cleanup:
546                 typhoon_inc_resp_index(&cleared, count);
547         }
548
549         indexes->respCleared = cpu_to_le32(cleared);
550         wmb();
551         return (resp_save == NULL);
552 }
553
554 static inline int
555 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
556 {
557         /* this works for all descriptors but rx_desc, as they are a
558          * different size than the cmd_desc -- everyone else is the same
559          */
560         lastWrite /= sizeof(struct cmd_desc);
561         lastRead /= sizeof(struct cmd_desc);
562         return (ringSize + lastRead - lastWrite - 1) % ringSize;
563 }
564
565 static inline int
566 typhoon_num_free_cmd(struct typhoon *tp)
567 {
568         int lastWrite = tp->cmdRing.lastWrite;
569         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
570
571         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
572 }
573
574 static inline int
575 typhoon_num_free_resp(struct typhoon *tp)
576 {
577         int respReady = le32_to_cpu(tp->indexes->respReady);
578         int respCleared = le32_to_cpu(tp->indexes->respCleared);
579
580         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
581 }
582
583 static inline int
584 typhoon_num_free_tx(struct transmit_ring *ring)
585 {
586         /* if we start using the Hi Tx ring, this needs updating */
587         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
588 }
589
590 static int
591 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
592                       int num_resp, struct resp_desc *resp)
593 {
594         struct typhoon_indexes *indexes = tp->indexes;
595         struct basic_ring *ring = &tp->cmdRing;
596         struct resp_desc local_resp;
597         int i, err = 0;
598         int got_resp;
599         int freeCmd, freeResp;
600         int len, wrap_len;
601
602         spin_lock(&tp->command_lock);
603
604         freeCmd = typhoon_num_free_cmd(tp);
605         freeResp = typhoon_num_free_resp(tp);
606
607         if(freeCmd < num_cmd || freeResp < num_resp) {
608                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
609                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
610                         freeResp, num_resp);
611                 err = -ENOMEM;
612                 goto out;
613         }
614
615         if(cmd->flags & TYPHOON_CMD_RESPOND) {
616                 /* If we're expecting a response, but the caller hasn't given
617                  * us a place to put it, we'll provide one.
618                  */
619                 tp->awaiting_resp = 1;
620                 if(resp == NULL) {
621                         resp = &local_resp;
622                         num_resp = 1;
623                 }
624         }
625
626         wrap_len = 0;
627         len = num_cmd * sizeof(*cmd);
628         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
629                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
630                 len = COMMAND_RING_SIZE - ring->lastWrite;
631         }
632
633         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
634         if(unlikely(wrap_len)) {
635                 struct cmd_desc *wrap_ptr = cmd;
636                 wrap_ptr += len / sizeof(*cmd);
637                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
638         }
639
640         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641
642         /* "I feel a presence... another warrior is on the mesa."
643          */
644         wmb();
645         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
646         typhoon_post_pci_writes(tp->ioaddr);
647
648         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
649                 goto out;
650
651         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
652          * preempt or do anything other than take interrupts. So, don't
653          * wait for a response unless you have to.
654          *
655          * I've thought about trying to sleep here, but we're called
656          * from many contexts that don't allow that. Also, given the way
657          * 3Com has implemented irq coalescing, we would likely timeout --
658          * this has been observed in real life!
659          *
660          * The big killer is we have to wait to get stats from the card,
661          * though we could go to a periodic refresh of those if we don't
662          * mind them getting somewhat stale. The rest of the waiting
663          * commands occur during open/close/suspend/resume, so they aren't
664          * time critical. Creating SAs in the future will also have to
665          * wait here.
666          */
667         got_resp = 0;
668         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
669                 if(indexes->respCleared != indexes->respReady)
670                         got_resp = typhoon_process_response(tp, num_resp,
671                                                                 resp);
672                 udelay(TYPHOON_UDELAY);
673         }
674
675         if(!got_resp) {
676                 err = -ETIMEDOUT;
677                 goto out;
678         }
679
680         /* Collect the error response even if we don't care about the
681          * rest of the response
682          */
683         if(resp->flags & TYPHOON_RESP_ERROR)
684                 err = -EIO;
685
686 out:
687         if(tp->awaiting_resp) {
688                 tp->awaiting_resp = 0;
689                 smp_wmb();
690
691                 /* Ugh. If a response was added to the ring between
692                  * the call to typhoon_process_response() and the clearing
693                  * of tp->awaiting_resp, we could have missed the interrupt
694                  * and it could hang in the ring an indeterminate amount of
695                  * time. So, check for it, and interrupt ourselves if this
696                  * is the case.
697                  */
698                 if(indexes->respCleared != indexes->respReady)
699                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
700         }
701
702         spin_unlock(&tp->command_lock);
703         return err;
704 }
705
706 static void
707 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
708 {
709         struct typhoon *tp = netdev_priv(dev);
710         struct cmd_desc xp_cmd;
711         int err;
712
713         spin_lock_bh(&tp->state_lock);
714         if(!tp->vlgrp != !grp) {
715                 /* We've either been turned on for the first time, or we've
716                  * been turned off. Update the 3XP.
717                  */
718                 if(grp)
719                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
720                 else
721                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
722
723                 /* If the interface is up, the runtime is running -- and we
724                  * must be up for the vlan core to call us.
725                  *
726                  * Do the command outside of the spin lock, as it is slow.
727                  */
728                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
729                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
730                 xp_cmd.parm2 = tp->offload;
731                 xp_cmd.parm3 = tp->offload;
732                 spin_unlock_bh(&tp->state_lock);
733                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
734                 if(err < 0)
735                         printk("%s: vlan offload error %d\n", tp->name, -err);
736                 spin_lock_bh(&tp->state_lock);
737         }
738
739         /* now make the change visible */
740         tp->vlgrp = grp;
741         spin_unlock_bh(&tp->state_lock);
742 }
743
744 static inline void
745 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
746                         u32 ring_dma)
747 {
748         struct tcpopt_desc *tcpd;
749         u32 tcpd_offset = ring_dma;
750
751         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
752         tcpd_offset += txRing->lastWrite;
753         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
754         typhoon_inc_tx_index(&txRing->lastWrite, 1);
755
756         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
757         tcpd->numDesc = 1;
758         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
759         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
760         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
761         tcpd->bytesTx = cpu_to_le32(skb->len);
762         tcpd->status = 0;
763 }
764
765 static int
766 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
767 {
768         struct typhoon *tp = netdev_priv(dev);
769         struct transmit_ring *txRing;
770         struct tx_desc *txd, *first_txd;
771         dma_addr_t skb_dma;
772         int numDesc;
773
774         /* we have two rings to choose from, but we only use txLo for now
775          * If we start using the Hi ring as well, we'll need to update
776          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
777          * and TXHI_ENTRIES to match, as well as update the TSO code below
778          * to get the right DMA address
779          */
780         txRing = &tp->txLoRing;
781
782         /* We need one descriptor for each fragment of the sk_buff, plus the
783          * one for the ->data area of it.
784          *
785          * The docs say a maximum of 16 fragment descriptors per TCP option
786          * descriptor, then make a new packet descriptor and option descriptor
787          * for the next 16 fragments. The engineers say just an option
788          * descriptor is needed. I've tested up to 26 fragments with a single
789          * packet descriptor/option descriptor combo, so I use that for now.
790          *
791          * If problems develop with TSO, check this first.
792          */
793         numDesc = skb_shinfo(skb)->nr_frags + 1;
794         if (skb_is_gso(skb))
795                 numDesc++;
796
797         /* When checking for free space in the ring, we need to also
798          * account for the initial Tx descriptor, and we always must leave
799          * at least one descriptor unused in the ring so that it doesn't
800          * wrap and look empty.
801          *
802          * The only time we should loop here is when we hit the race
803          * between marking the queue awake and updating the cleared index.
804          * Just loop and it will appear. This comes from the acenic driver.
805          */
806         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
807                 smp_rmb();
808
809         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
810         typhoon_inc_tx_index(&txRing->lastWrite, 1);
811
812         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
813         first_txd->numDesc = 0;
814         first_txd->len = 0;
815         first_txd->tx_addr = (u64)((unsigned long) skb);
816         first_txd->processFlags = 0;
817
818         if(skb->ip_summed == CHECKSUM_PARTIAL) {
819                 /* The 3XP will figure out if this is UDP/TCP */
820                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
821                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
822                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
823         }
824
825         if(vlan_tx_tag_present(skb)) {
826                 first_txd->processFlags |=
827                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
828                 first_txd->processFlags |=
829                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
830                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
831         }
832
833         if (skb_is_gso(skb)) {
834                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
835                 first_txd->numDesc++;
836
837                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
838         }
839
840         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
841         typhoon_inc_tx_index(&txRing->lastWrite, 1);
842
843         /* No need to worry about padding packet -- the firmware pads
844          * it with zeros to ETH_ZLEN for us.
845          */
846         if(skb_shinfo(skb)->nr_frags == 0) {
847                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
848                                        PCI_DMA_TODEVICE);
849                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
850                 txd->len = cpu_to_le16(skb->len);
851                 txd->frag.addr = cpu_to_le32(skb_dma);
852                 txd->frag.addrHi = 0;
853                 first_txd->numDesc++;
854         } else {
855                 int i, len;
856
857                 len = skb_headlen(skb);
858                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
859                                          PCI_DMA_TODEVICE);
860                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
861                 txd->len = cpu_to_le16(len);
862                 txd->frag.addr = cpu_to_le32(skb_dma);
863                 txd->frag.addrHi = 0;
864                 first_txd->numDesc++;
865
866                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
867                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
868                         void *frag_addr;
869
870                         txd = (struct tx_desc *) (txRing->ringBase +
871                                                 txRing->lastWrite);
872                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
873
874                         len = frag->size;
875                         frag_addr = (void *) page_address(frag->page) +
876                                                 frag->page_offset;
877                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
878                                          PCI_DMA_TODEVICE);
879                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
880                         txd->len = cpu_to_le16(len);
881                         txd->frag.addr = cpu_to_le32(skb_dma);
882                         txd->frag.addrHi = 0;
883                         first_txd->numDesc++;
884                 }
885         }
886
887         /* Kick the 3XP
888          */
889         wmb();
890         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
891
892         dev->trans_start = jiffies;
893
894         /* If we don't have room to put the worst case packet on the
895          * queue, then we must stop the queue. We need 2 extra
896          * descriptors -- one to prevent ring wrap, and one for the
897          * Tx header.
898          */
899         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
900
901         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
902                 netif_stop_queue(dev);
903
904                 /* A Tx complete IRQ could have gotten inbetween, making
905                  * the ring free again. Only need to recheck here, since
906                  * Tx is serialized.
907                  */
908                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
909                         netif_wake_queue(dev);
910         }
911
912         return 0;
913 }
914
915 static void
916 typhoon_set_rx_mode(struct net_device *dev)
917 {
918         struct typhoon *tp = netdev_priv(dev);
919         struct cmd_desc xp_cmd;
920         u32 mc_filter[2];
921         __le16 filter;
922
923         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
924         if(dev->flags & IFF_PROMISC) {
925                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
926         } else if((dev->mc_count > multicast_filter_limit) ||
927                   (dev->flags & IFF_ALLMULTI)) {
928                 /* Too many to match, or accept all multicasts. */
929                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
930         } else if(dev->mc_count) {
931                 struct dev_mc_list *mclist;
932                 int i;
933
934                 memset(mc_filter, 0, sizeof(mc_filter));
935                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
936                     i++, mclist = mclist->next) {
937                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
938                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
939                 }
940
941                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
942                                          TYPHOON_CMD_SET_MULTICAST_HASH);
943                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
944                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
945                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
946                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
947
948                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
949         }
950
951         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
952         xp_cmd.parm1 = filter;
953         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
954 }
955
956 static int
957 typhoon_do_get_stats(struct typhoon *tp)
958 {
959         struct net_device_stats *stats = &tp->stats;
960         struct net_device_stats *saved = &tp->stats_saved;
961         struct cmd_desc xp_cmd;
962         struct resp_desc xp_resp[7];
963         struct stats_resp *s = (struct stats_resp *) xp_resp;
964         int err;
965
966         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
967         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
968         if(err < 0)
969                 return err;
970
971         /* 3Com's Linux driver uses txMultipleCollisions as it's
972          * collisions value, but there is some other collision info as well...
973          *
974          * The extra status reported would be a good candidate for
975          * ethtool_ops->get_{strings,stats}()
976          */
977         stats->tx_packets = le32_to_cpu(s->txPackets);
978         stats->tx_bytes = le64_to_cpu(s->txBytes);
979         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
980         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
981         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
982         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
983         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
984         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
985         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
986                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
987         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
988         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
989         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
990                         SPEED_100 : SPEED_10;
991         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
992                         DUPLEX_FULL : DUPLEX_HALF;
993
994         /* add in the saved statistics
995          */
996         stats->tx_packets += saved->tx_packets;
997         stats->tx_bytes += saved->tx_bytes;
998         stats->tx_errors += saved->tx_errors;
999         stats->collisions += saved->collisions;
1000         stats->rx_packets += saved->rx_packets;
1001         stats->rx_bytes += saved->rx_bytes;
1002         stats->rx_fifo_errors += saved->rx_fifo_errors;
1003         stats->rx_errors += saved->rx_errors;
1004         stats->rx_crc_errors += saved->rx_crc_errors;
1005         stats->rx_length_errors += saved->rx_length_errors;
1006
1007         return 0;
1008 }
1009
1010 static struct net_device_stats *
1011 typhoon_get_stats(struct net_device *dev)
1012 {
1013         struct typhoon *tp = netdev_priv(dev);
1014         struct net_device_stats *stats = &tp->stats;
1015         struct net_device_stats *saved = &tp->stats_saved;
1016
1017         smp_rmb();
1018         if(tp->card_state == Sleeping)
1019                 return saved;
1020
1021         if(typhoon_do_get_stats(tp) < 0) {
1022                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1023                 return saved;
1024         }
1025
1026         return stats;
1027 }
1028
1029 static int
1030 typhoon_set_mac_address(struct net_device *dev, void *addr)
1031 {
1032         struct sockaddr *saddr = (struct sockaddr *) addr;
1033
1034         if(netif_running(dev))
1035                 return -EBUSY;
1036
1037         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1038         return 0;
1039 }
1040
1041 static void
1042 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1043 {
1044         struct typhoon *tp = netdev_priv(dev);
1045         struct pci_dev *pci_dev = tp->pdev;
1046         struct cmd_desc xp_cmd;
1047         struct resp_desc xp_resp[3];
1048
1049         smp_rmb();
1050         if(tp->card_state == Sleeping) {
1051                 strcpy(info->fw_version, "Sleep image");
1052         } else {
1053                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1054                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1055                         strcpy(info->fw_version, "Unknown runtime");
1056                 } else {
1057                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1058                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1059                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1060                                  sleep_ver & 0xfff);
1061                 }
1062         }
1063
1064         strcpy(info->driver, DRV_MODULE_NAME);
1065         strcpy(info->version, DRV_MODULE_VERSION);
1066         strcpy(info->bus_info, pci_name(pci_dev));
1067 }
1068
1069 static int
1070 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 {
1072         struct typhoon *tp = netdev_priv(dev);
1073
1074         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1075                                 SUPPORTED_Autoneg;
1076
1077         switch (tp->xcvr_select) {
1078         case TYPHOON_XCVR_10HALF:
1079                 cmd->advertising = ADVERTISED_10baseT_Half;
1080                 break;
1081         case TYPHOON_XCVR_10FULL:
1082                 cmd->advertising = ADVERTISED_10baseT_Full;
1083                 break;
1084         case TYPHOON_XCVR_100HALF:
1085                 cmd->advertising = ADVERTISED_100baseT_Half;
1086                 break;
1087         case TYPHOON_XCVR_100FULL:
1088                 cmd->advertising = ADVERTISED_100baseT_Full;
1089                 break;
1090         case TYPHOON_XCVR_AUTONEG:
1091                 cmd->advertising = ADVERTISED_10baseT_Half |
1092                                             ADVERTISED_10baseT_Full |
1093                                             ADVERTISED_100baseT_Half |
1094                                             ADVERTISED_100baseT_Full |
1095                                             ADVERTISED_Autoneg;
1096                 break;
1097         }
1098
1099         if(tp->capabilities & TYPHOON_FIBER) {
1100                 cmd->supported |= SUPPORTED_FIBRE;
1101                 cmd->advertising |= ADVERTISED_FIBRE;
1102                 cmd->port = PORT_FIBRE;
1103         } else {
1104                 cmd->supported |= SUPPORTED_10baseT_Half |
1105                                         SUPPORTED_10baseT_Full |
1106                                         SUPPORTED_TP;
1107                 cmd->advertising |= ADVERTISED_TP;
1108                 cmd->port = PORT_TP;
1109         }
1110
1111         /* need to get stats to make these link speed/duplex valid */
1112         typhoon_do_get_stats(tp);
1113         cmd->speed = tp->speed;
1114         cmd->duplex = tp->duplex;
1115         cmd->phy_address = 0;
1116         cmd->transceiver = XCVR_INTERNAL;
1117         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1118                 cmd->autoneg = AUTONEG_ENABLE;
1119         else
1120                 cmd->autoneg = AUTONEG_DISABLE;
1121         cmd->maxtxpkt = 1;
1122         cmd->maxrxpkt = 1;
1123
1124         return 0;
1125 }
1126
1127 static int
1128 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1129 {
1130         struct typhoon *tp = netdev_priv(dev);
1131         struct cmd_desc xp_cmd;
1132         __le16 xcvr;
1133         int err;
1134
1135         err = -EINVAL;
1136         if(cmd->autoneg == AUTONEG_ENABLE) {
1137                 xcvr = TYPHOON_XCVR_AUTONEG;
1138         } else {
1139                 if(cmd->duplex == DUPLEX_HALF) {
1140                         if(cmd->speed == SPEED_10)
1141                                 xcvr = TYPHOON_XCVR_10HALF;
1142                         else if(cmd->speed == SPEED_100)
1143                                 xcvr = TYPHOON_XCVR_100HALF;
1144                         else
1145                                 goto out;
1146                 } else if(cmd->duplex == DUPLEX_FULL) {
1147                         if(cmd->speed == SPEED_10)
1148                                 xcvr = TYPHOON_XCVR_10FULL;
1149                         else if(cmd->speed == SPEED_100)
1150                                 xcvr = TYPHOON_XCVR_100FULL;
1151                         else
1152                                 goto out;
1153                 } else
1154                         goto out;
1155         }
1156
1157         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1158         xp_cmd.parm1 = xcvr;
1159         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1160         if(err < 0)
1161                 goto out;
1162
1163         tp->xcvr_select = xcvr;
1164         if(cmd->autoneg == AUTONEG_ENABLE) {
1165                 tp->speed = 0xff;       /* invalid */
1166                 tp->duplex = 0xff;      /* invalid */
1167         } else {
1168                 tp->speed = cmd->speed;
1169                 tp->duplex = cmd->duplex;
1170         }
1171
1172 out:
1173         return err;
1174 }
1175
1176 static void
1177 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1178 {
1179         struct typhoon *tp = netdev_priv(dev);
1180
1181         wol->supported = WAKE_PHY | WAKE_MAGIC;
1182         wol->wolopts = 0;
1183         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1184                 wol->wolopts |= WAKE_PHY;
1185         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1186                 wol->wolopts |= WAKE_MAGIC;
1187         memset(&wol->sopass, 0, sizeof(wol->sopass));
1188 }
1189
1190 static int
1191 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1192 {
1193         struct typhoon *tp = netdev_priv(dev);
1194
1195         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1196                 return -EINVAL;
1197
1198         tp->wol_events = 0;
1199         if(wol->wolopts & WAKE_PHY)
1200                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1201         if(wol->wolopts & WAKE_MAGIC)
1202                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1203
1204         return 0;
1205 }
1206
1207 static u32
1208 typhoon_get_rx_csum(struct net_device *dev)
1209 {
1210         /* For now, we don't allow turning off RX checksums.
1211          */
1212         return 1;
1213 }
1214
1215 static void
1216 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1217 {
1218         ering->rx_max_pending = RXENT_ENTRIES;
1219         ering->rx_mini_max_pending = 0;
1220         ering->rx_jumbo_max_pending = 0;
1221         ering->tx_max_pending = TXLO_ENTRIES - 1;
1222
1223         ering->rx_pending = RXENT_ENTRIES;
1224         ering->rx_mini_pending = 0;
1225         ering->rx_jumbo_pending = 0;
1226         ering->tx_pending = TXLO_ENTRIES - 1;
1227 }
1228
1229 static const struct ethtool_ops typhoon_ethtool_ops = {
1230         .get_settings           = typhoon_get_settings,
1231         .set_settings           = typhoon_set_settings,
1232         .get_drvinfo            = typhoon_get_drvinfo,
1233         .get_wol                = typhoon_get_wol,
1234         .set_wol                = typhoon_set_wol,
1235         .get_link               = ethtool_op_get_link,
1236         .get_rx_csum            = typhoon_get_rx_csum,
1237         .set_tx_csum            = ethtool_op_set_tx_csum,
1238         .set_sg                 = ethtool_op_set_sg,
1239         .set_tso                = ethtool_op_set_tso,
1240         .get_ringparam          = typhoon_get_ringparam,
1241 };
1242
1243 static int
1244 typhoon_wait_interrupt(void __iomem *ioaddr)
1245 {
1246         int i, err = 0;
1247
1248         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1249                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1250                    TYPHOON_INTR_BOOTCMD)
1251                         goto out;
1252                 udelay(TYPHOON_UDELAY);
1253         }
1254
1255         err = -ETIMEDOUT;
1256
1257 out:
1258         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1259         return err;
1260 }
1261
1262 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1263
1264 static void
1265 typhoon_init_interface(struct typhoon *tp)
1266 {
1267         struct typhoon_interface *iface = &tp->shared->iface;
1268         dma_addr_t shared_dma;
1269
1270         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271
1272         /* The *Hi members of iface are all init'd to zero by the memset().
1273          */
1274         shared_dma = tp->shared_dma + shared_offset(indexes);
1275         iface->ringIndex = cpu_to_le32(shared_dma);
1276
1277         shared_dma = tp->shared_dma + shared_offset(txLo);
1278         iface->txLoAddr = cpu_to_le32(shared_dma);
1279         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280
1281         shared_dma = tp->shared_dma + shared_offset(txHi);
1282         iface->txHiAddr = cpu_to_le32(shared_dma);
1283         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284
1285         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1286         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1287         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1288                                         sizeof(struct rx_free));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxLo);
1291         iface->rxLoAddr = cpu_to_le32(shared_dma);
1292         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293
1294         shared_dma = tp->shared_dma + shared_offset(rxHi);
1295         iface->rxHiAddr = cpu_to_le32(shared_dma);
1296         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297
1298         shared_dma = tp->shared_dma + shared_offset(cmd);
1299         iface->cmdAddr = cpu_to_le32(shared_dma);
1300         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301
1302         shared_dma = tp->shared_dma + shared_offset(resp);
1303         iface->respAddr = cpu_to_le32(shared_dma);
1304         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305
1306         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1307         iface->zeroAddr = cpu_to_le32(shared_dma);
1308
1309         tp->indexes = &tp->shared->indexes;
1310         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1311         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1312         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1313         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1314         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1315         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1316         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317
1318         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1319         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320
1321         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1322         tp->card_state = Sleeping;
1323         smp_wmb();
1324
1325         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1326         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327
1328         spin_lock_init(&tp->command_lock);
1329         spin_lock_init(&tp->state_lock);
1330 }
1331
1332 static void
1333 typhoon_init_rings(struct typhoon *tp)
1334 {
1335         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336
1337         tp->txLoRing.lastWrite = 0;
1338         tp->txHiRing.lastWrite = 0;
1339         tp->rxLoRing.lastWrite = 0;
1340         tp->rxHiRing.lastWrite = 0;
1341         tp->rxBuffRing.lastWrite = 0;
1342         tp->cmdRing.lastWrite = 0;
1343         tp->cmdRing.lastWrite = 0;
1344
1345         tp->txLoRing.lastRead = 0;
1346         tp->txHiRing.lastRead = 0;
1347 }
1348
1349 static const struct firmware *typhoon_fw;
1350
1351 static int
1352 typhoon_request_firmware(struct typhoon *tp)
1353 {
1354         const struct typhoon_file_header *fHdr;
1355         const struct typhoon_section_header *sHdr;
1356         const u8 *image_data;
1357         u32 numSections;
1358         u32 section_len;
1359         u32 remaining;
1360         int err;
1361
1362         if (typhoon_fw)
1363                 return 0;
1364
1365         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1366         if (err) {
1367                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
1368                                 tp->name, FIRMWARE_NAME);
1369                 return err;
1370         }
1371
1372         image_data = (u8 *) typhoon_fw->data;
1373         remaining = typhoon_fw->size;
1374         if (remaining < sizeof(struct typhoon_file_header))
1375                 goto invalid_fw;
1376
1377         fHdr = (struct typhoon_file_header *) image_data;
1378         if (memcmp(fHdr->tag, "TYPHOON", 8))
1379                 goto invalid_fw;
1380
1381         numSections = le32_to_cpu(fHdr->numSections);
1382         image_data += sizeof(struct typhoon_file_header);
1383         remaining -= sizeof(struct typhoon_file_header);
1384
1385         while (numSections--) {
1386                 if (remaining < sizeof(struct typhoon_section_header))
1387                         goto invalid_fw;
1388
1389                 sHdr = (struct typhoon_section_header *) image_data;
1390                 image_data += sizeof(struct typhoon_section_header);
1391                 section_len = le32_to_cpu(sHdr->len);
1392
1393                 if (remaining < section_len)
1394                         goto invalid_fw;
1395
1396                 image_data += section_len;
1397                 remaining -= section_len;
1398         }
1399
1400         return 0;
1401
1402 invalid_fw:
1403         printk(KERN_ERR "%s: Invalid firmware image\n", tp->name);
1404         release_firmware(typhoon_fw);
1405         typhoon_fw = NULL;
1406         return -EINVAL;
1407 }
1408
1409 static int
1410 typhoon_download_firmware(struct typhoon *tp)
1411 {
1412         void __iomem *ioaddr = tp->ioaddr;
1413         struct pci_dev *pdev = tp->pdev;
1414         const struct typhoon_file_header *fHdr;
1415         const struct typhoon_section_header *sHdr;
1416         const u8 *image_data;
1417         void *dpage;
1418         dma_addr_t dpage_dma;
1419         __sum16 csum;
1420         u32 irqEnabled;
1421         u32 irqMasked;
1422         u32 numSections;
1423         u32 section_len;
1424         u32 len;
1425         u32 load_addr;
1426         u32 hmac;
1427         int i;
1428         int err;
1429
1430         image_data = (u8 *) typhoon_fw->data;
1431         fHdr = (struct typhoon_file_header *) image_data;
1432
1433         /* Cannot just map the firmware image using pci_map_single() as
1434          * the firmware is vmalloc()'d and may not be physically contiguous,
1435          * so we allocate some consistent memory to copy the sections into.
1436          */
1437         err = -ENOMEM;
1438         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1439         if(!dpage) {
1440                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1441                 goto err_out;
1442         }
1443
1444         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1445         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1446                ioaddr + TYPHOON_REG_INTR_ENABLE);
1447         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1448         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1449                ioaddr + TYPHOON_REG_INTR_MASK);
1450
1451         err = -ETIMEDOUT;
1452         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1453                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1454                 goto err_out_irq;
1455         }
1456
1457         numSections = le32_to_cpu(fHdr->numSections);
1458         load_addr = le32_to_cpu(fHdr->startAddr);
1459
1460         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1461         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1462         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1463         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1464         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1465         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1466         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1467         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1468         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1469         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1470         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1471         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1472         typhoon_post_pci_writes(ioaddr);
1473         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1474
1475         image_data += sizeof(struct typhoon_file_header);
1476
1477         /* The ioread32() in typhoon_wait_interrupt() will force the
1478          * last write to the command register to post, so
1479          * we don't need a typhoon_post_pci_writes() after it.
1480          */
1481         for(i = 0; i < numSections; i++) {
1482                 sHdr = (struct typhoon_section_header *) image_data;
1483                 image_data += sizeof(struct typhoon_section_header);
1484                 load_addr = le32_to_cpu(sHdr->startAddr);
1485                 section_len = le32_to_cpu(sHdr->len);
1486
1487                 while(section_len) {
1488                         len = min_t(u32, section_len, PAGE_SIZE);
1489
1490                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1491                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1492                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1493                                 printk(KERN_ERR "%s: segment ready timeout\n",
1494                                        tp->name);
1495                                 goto err_out_irq;
1496                         }
1497
1498                         /* Do an pseudo IPv4 checksum on the data -- first
1499                          * need to convert each u16 to cpu order before
1500                          * summing. Fortunately, due to the properties of
1501                          * the checksum, we can do this once, at the end.
1502                          */
1503                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1504                                                                   dpage, len,
1505                                                                   0));
1506
1507                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1508                         iowrite32(le16_to_cpu((__force __le16)csum),
1509                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1510                         iowrite32(load_addr,
1511                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1512                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1513                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1514                         typhoon_post_pci_writes(ioaddr);
1515                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1516                                ioaddr + TYPHOON_REG_COMMAND);
1517
1518                         image_data += len;
1519                         load_addr += len;
1520                         section_len -= len;
1521                 }
1522         }
1523
1524         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1525            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1526            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1527                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1528                 goto err_out_irq;
1529         }
1530
1531         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1532
1533         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1534                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1535                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1536                 goto err_out_irq;
1537         }
1538
1539         err = 0;
1540
1541 err_out_irq:
1542         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1543         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1544
1545         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1546
1547 err_out:
1548         return err;
1549 }
1550
1551 static int
1552 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1553 {
1554         void __iomem *ioaddr = tp->ioaddr;
1555
1556         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1557                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1558                 goto out_timeout;
1559         }
1560
1561         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1562         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1563         typhoon_post_pci_writes(ioaddr);
1564         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1565                                 ioaddr + TYPHOON_REG_COMMAND);
1566
1567         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1568                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1569                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1570                 goto out_timeout;
1571         }
1572
1573         /* Clear the Transmit and Command ready registers
1574          */
1575         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1576         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1577         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1578         typhoon_post_pci_writes(ioaddr);
1579         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1580
1581         return 0;
1582
1583 out_timeout:
1584         return -ETIMEDOUT;
1585 }
1586
1587 static u32
1588 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1589                         volatile __le32 * index)
1590 {
1591         u32 lastRead = txRing->lastRead;
1592         struct tx_desc *tx;
1593         dma_addr_t skb_dma;
1594         int dma_len;
1595         int type;
1596
1597         while(lastRead != le32_to_cpu(*index)) {
1598                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1599                 type = tx->flags & TYPHOON_TYPE_MASK;
1600
1601                 if(type == TYPHOON_TX_DESC) {
1602                         /* This tx_desc describes a packet.
1603                          */
1604                         unsigned long ptr = tx->tx_addr;
1605                         struct sk_buff *skb = (struct sk_buff *) ptr;
1606                         dev_kfree_skb_irq(skb);
1607                 } else if(type == TYPHOON_FRAG_DESC) {
1608                         /* This tx_desc describes a memory mapping. Free it.
1609                          */
1610                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1611                         dma_len = le16_to_cpu(tx->len);
1612                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1613                                        PCI_DMA_TODEVICE);
1614                 }
1615
1616                 tx->flags = 0;
1617                 typhoon_inc_tx_index(&lastRead, 1);
1618         }
1619
1620         return lastRead;
1621 }
1622
1623 static void
1624 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1625                         volatile __le32 * index)
1626 {
1627         u32 lastRead;
1628         int numDesc = MAX_SKB_FRAGS + 1;
1629
1630         /* This will need changing if we start to use the Hi Tx ring. */
1631         lastRead = typhoon_clean_tx(tp, txRing, index);
1632         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1633                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1634                 netif_wake_queue(tp->dev);
1635
1636         txRing->lastRead = lastRead;
1637         smp_wmb();
1638 }
1639
1640 static void
1641 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1642 {
1643         struct typhoon_indexes *indexes = tp->indexes;
1644         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1645         struct basic_ring *ring = &tp->rxBuffRing;
1646         struct rx_free *r;
1647
1648         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1649                                 le32_to_cpu(indexes->rxBuffCleared)) {
1650                 /* no room in ring, just drop the skb
1651                  */
1652                 dev_kfree_skb_any(rxb->skb);
1653                 rxb->skb = NULL;
1654                 return;
1655         }
1656
1657         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1658         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1659         r->virtAddr = idx;
1660         r->physAddr = cpu_to_le32(rxb->dma_addr);
1661
1662         /* Tell the card about it */
1663         wmb();
1664         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1665 }
1666
1667 static int
1668 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1669 {
1670         struct typhoon_indexes *indexes = tp->indexes;
1671         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1672         struct basic_ring *ring = &tp->rxBuffRing;
1673         struct rx_free *r;
1674         struct sk_buff *skb;
1675         dma_addr_t dma_addr;
1676
1677         rxb->skb = NULL;
1678
1679         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1680                                 le32_to_cpu(indexes->rxBuffCleared))
1681                 return -ENOMEM;
1682
1683         skb = dev_alloc_skb(PKT_BUF_SZ);
1684         if(!skb)
1685                 return -ENOMEM;
1686
1687 #if 0
1688         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1689          * address! Pretty please?
1690          */
1691         skb_reserve(skb, 2);
1692 #endif
1693
1694         skb->dev = tp->dev;
1695         dma_addr = pci_map_single(tp->pdev, skb->data,
1696                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1697
1698         /* Since no card does 64 bit DAC, the high bits will never
1699          * change from zero.
1700          */
1701         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1702         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1703         r->virtAddr = idx;
1704         r->physAddr = cpu_to_le32(dma_addr);
1705         rxb->skb = skb;
1706         rxb->dma_addr = dma_addr;
1707
1708         /* Tell the card about it */
1709         wmb();
1710         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1711         return 0;
1712 }
1713
1714 static int
1715 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1716            volatile __le32 * cleared, int budget)
1717 {
1718         struct rx_desc *rx;
1719         struct sk_buff *skb, *new_skb;
1720         struct rxbuff_ent *rxb;
1721         dma_addr_t dma_addr;
1722         u32 local_ready;
1723         u32 rxaddr;
1724         int pkt_len;
1725         u32 idx;
1726         __le32 csum_bits;
1727         int received;
1728
1729         received = 0;
1730         local_ready = le32_to_cpu(*ready);
1731         rxaddr = le32_to_cpu(*cleared);
1732         while(rxaddr != local_ready && budget > 0) {
1733                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1734                 idx = rx->addr;
1735                 rxb = &tp->rxbuffers[idx];
1736                 skb = rxb->skb;
1737                 dma_addr = rxb->dma_addr;
1738
1739                 typhoon_inc_rx_index(&rxaddr, 1);
1740
1741                 if(rx->flags & TYPHOON_RX_ERROR) {
1742                         typhoon_recycle_rx_skb(tp, idx);
1743                         continue;
1744                 }
1745
1746                 pkt_len = le16_to_cpu(rx->frameLen);
1747
1748                 if(pkt_len < rx_copybreak &&
1749                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1750                         skb_reserve(new_skb, 2);
1751                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1752                                                     PKT_BUF_SZ,
1753                                                     PCI_DMA_FROMDEVICE);
1754                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1755                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1756                                                        PKT_BUF_SZ,
1757                                                        PCI_DMA_FROMDEVICE);
1758                         skb_put(new_skb, pkt_len);
1759                         typhoon_recycle_rx_skb(tp, idx);
1760                 } else {
1761                         new_skb = skb;
1762                         skb_put(new_skb, pkt_len);
1763                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1764                                        PCI_DMA_FROMDEVICE);
1765                         typhoon_alloc_rx_skb(tp, idx);
1766                 }
1767                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1768                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1769                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1770                 if(csum_bits ==
1771                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1772                    || csum_bits ==
1773                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1774                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1775                 } else
1776                         new_skb->ip_summed = CHECKSUM_NONE;
1777
1778                 spin_lock(&tp->state_lock);
1779                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1780                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1781                                                  ntohl(rx->vlanTag) & 0xffff);
1782                 else
1783                         netif_receive_skb(new_skb);
1784                 spin_unlock(&tp->state_lock);
1785
1786                 received++;
1787                 budget--;
1788         }
1789         *cleared = cpu_to_le32(rxaddr);
1790
1791         return received;
1792 }
1793
1794 static void
1795 typhoon_fill_free_ring(struct typhoon *tp)
1796 {
1797         u32 i;
1798
1799         for(i = 0; i < RXENT_ENTRIES; i++) {
1800                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1801                 if(rxb->skb)
1802                         continue;
1803                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1804                         break;
1805         }
1806 }
1807
1808 static int
1809 typhoon_poll(struct napi_struct *napi, int budget)
1810 {
1811         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1812         struct typhoon_indexes *indexes = tp->indexes;
1813         int work_done;
1814
1815         rmb();
1816         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1817                         typhoon_process_response(tp, 0, NULL);
1818
1819         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1820                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1821
1822         work_done = 0;
1823
1824         if(indexes->rxHiCleared != indexes->rxHiReady) {
1825                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1826                                         &indexes->rxHiCleared, budget);
1827         }
1828
1829         if(indexes->rxLoCleared != indexes->rxLoReady) {
1830                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1831                                         &indexes->rxLoCleared, budget - work_done);
1832         }
1833
1834         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1835                 /* rxBuff ring is empty, try to fill it. */
1836                 typhoon_fill_free_ring(tp);
1837         }
1838
1839         if (work_done < budget) {
1840                 napi_complete(napi);
1841                 iowrite32(TYPHOON_INTR_NONE,
1842                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1843                 typhoon_post_pci_writes(tp->ioaddr);
1844         }
1845
1846         return work_done;
1847 }
1848
1849 static irqreturn_t
1850 typhoon_interrupt(int irq, void *dev_instance)
1851 {
1852         struct net_device *dev = dev_instance;
1853         struct typhoon *tp = netdev_priv(dev);
1854         void __iomem *ioaddr = tp->ioaddr;
1855         u32 intr_status;
1856
1857         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1858         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1859                 return IRQ_NONE;
1860
1861         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1862
1863         if (napi_schedule_prep(&tp->napi)) {
1864                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1865                 typhoon_post_pci_writes(ioaddr);
1866                 __napi_schedule(&tp->napi);
1867         } else {
1868                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1869                        dev->name);
1870         }
1871         return IRQ_HANDLED;
1872 }
1873
1874 static void
1875 typhoon_free_rx_rings(struct typhoon *tp)
1876 {
1877         u32 i;
1878
1879         for(i = 0; i < RXENT_ENTRIES; i++) {
1880                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1881                 if(rxb->skb) {
1882                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1883                                        PCI_DMA_FROMDEVICE);
1884                         dev_kfree_skb(rxb->skb);
1885                         rxb->skb = NULL;
1886                 }
1887         }
1888 }
1889
1890 static int
1891 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1892 {
1893         struct pci_dev *pdev = tp->pdev;
1894         void __iomem *ioaddr = tp->ioaddr;
1895         struct cmd_desc xp_cmd;
1896         int err;
1897
1898         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1899         xp_cmd.parm1 = events;
1900         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1901         if(err < 0) {
1902                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1903                                 tp->name, err);
1904                 return err;
1905         }
1906
1907         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1908         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1909         if(err < 0) {
1910                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1911                                 tp->name, err);
1912                 return err;
1913         }
1914
1915         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1916                 return -ETIMEDOUT;
1917
1918         /* Since we cannot monitor the status of the link while sleeping,
1919          * tell the world it went away.
1920          */
1921         netif_carrier_off(tp->dev);
1922
1923         pci_enable_wake(tp->pdev, state, 1);
1924         pci_disable_device(pdev);
1925         return pci_set_power_state(pdev, state);
1926 }
1927
1928 static int
1929 typhoon_wakeup(struct typhoon *tp, int wait_type)
1930 {
1931         struct pci_dev *pdev = tp->pdev;
1932         void __iomem *ioaddr = tp->ioaddr;
1933
1934         pci_set_power_state(pdev, PCI_D0);
1935         pci_restore_state(pdev);
1936
1937         /* Post 2.x.x versions of the Sleep Image require a reset before
1938          * we can download the Runtime Image. But let's not make users of
1939          * the old firmware pay for the reset.
1940          */
1941         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1942         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1943                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1944                 return typhoon_reset(ioaddr, wait_type);
1945
1946         return 0;
1947 }
1948
1949 static int
1950 typhoon_start_runtime(struct typhoon *tp)
1951 {
1952         struct net_device *dev = tp->dev;
1953         void __iomem *ioaddr = tp->ioaddr;
1954         struct cmd_desc xp_cmd;
1955         int err;
1956
1957         typhoon_init_rings(tp);
1958         typhoon_fill_free_ring(tp);
1959
1960         err = typhoon_download_firmware(tp);
1961         if(err < 0) {
1962                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1963                 goto error_out;
1964         }
1965
1966         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1967                 printk("%s: cannot boot 3XP\n", tp->name);
1968                 err = -EIO;
1969                 goto error_out;
1970         }
1971
1972         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1973         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1974         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1975         if(err < 0)
1976                 goto error_out;
1977
1978         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1979         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1980         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1981         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1982         if(err < 0)
1983                 goto error_out;
1984
1985         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1986          * us some more information on how to control it.
1987          */
1988         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1989         xp_cmd.parm1 = 0;
1990         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1991         if(err < 0)
1992                 goto error_out;
1993
1994         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1995         xp_cmd.parm1 = tp->xcvr_select;
1996         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1997         if(err < 0)
1998                 goto error_out;
1999
2000         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
2001         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
2002         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2003         if(err < 0)
2004                 goto error_out;
2005
2006         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
2007         spin_lock_bh(&tp->state_lock);
2008         xp_cmd.parm2 = tp->offload;
2009         xp_cmd.parm3 = tp->offload;
2010         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2011         spin_unlock_bh(&tp->state_lock);
2012         if(err < 0)
2013                 goto error_out;
2014
2015         typhoon_set_rx_mode(dev);
2016
2017         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2018         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2019         if(err < 0)
2020                 goto error_out;
2021
2022         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2023         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2024         if(err < 0)
2025                 goto error_out;
2026
2027         tp->card_state = Running;
2028         smp_wmb();
2029
2030         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2031         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2032         typhoon_post_pci_writes(ioaddr);
2033
2034         return 0;
2035
2036 error_out:
2037         typhoon_reset(ioaddr, WaitNoSleep);
2038         typhoon_free_rx_rings(tp);
2039         typhoon_init_rings(tp);
2040         return err;
2041 }
2042
2043 static int
2044 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2045 {
2046         struct typhoon_indexes *indexes = tp->indexes;
2047         struct transmit_ring *txLo = &tp->txLoRing;
2048         void __iomem *ioaddr = tp->ioaddr;
2049         struct cmd_desc xp_cmd;
2050         int i;
2051
2052         /* Disable interrupts early, since we can't schedule a poll
2053          * when called with !netif_running(). This will be posted
2054          * when we force the posting of the command.
2055          */
2056         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2057
2058         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2059         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2060
2061         /* Wait 1/2 sec for any outstanding transmits to occur
2062          * We'll cleanup after the reset if this times out.
2063          */
2064         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2065                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2066                         break;
2067                 udelay(TYPHOON_UDELAY);
2068         }
2069
2070         if(i == TYPHOON_WAIT_TIMEOUT)
2071                 printk(KERN_ERR
2072                        "%s: halt timed out waiting for Tx to complete\n",
2073                        tp->name);
2074
2075         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2076         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2077
2078         /* save the statistics so when we bring the interface up again,
2079          * the values reported to userspace are correct.
2080          */
2081         tp->card_state = Sleeping;
2082         smp_wmb();
2083         typhoon_do_get_stats(tp);
2084         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2085
2086         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2087         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2088
2089         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2090                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2091                        tp->name);
2092
2093         if(typhoon_reset(ioaddr, wait_type) < 0) {
2094                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2095                 return -ETIMEDOUT;
2096         }
2097
2098         /* cleanup any outstanding Tx packets */
2099         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2100                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2101                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2102         }
2103
2104         return 0;
2105 }
2106
2107 static void
2108 typhoon_tx_timeout(struct net_device *dev)
2109 {
2110         struct typhoon *tp = netdev_priv(dev);
2111
2112         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2113                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2114                                         dev->name);
2115                 goto truely_dead;
2116         }
2117
2118         /* If we ever start using the Hi ring, it will need cleaning too */
2119         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2120         typhoon_free_rx_rings(tp);
2121
2122         if(typhoon_start_runtime(tp) < 0) {
2123                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2124                                         dev->name);
2125                 goto truely_dead;
2126         }
2127
2128         netif_wake_queue(dev);
2129         return;
2130
2131 truely_dead:
2132         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2133         typhoon_reset(tp->ioaddr, NoWait);
2134         netif_carrier_off(dev);
2135 }
2136
2137 static int
2138 typhoon_open(struct net_device *dev)
2139 {
2140         struct typhoon *tp = netdev_priv(dev);
2141         int err;
2142
2143         err = typhoon_request_firmware(tp);
2144         if (err)
2145                 goto out;
2146
2147         err = typhoon_wakeup(tp, WaitSleep);
2148         if(err < 0) {
2149                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2150                 goto out_sleep;
2151         }
2152
2153         err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2154                                 dev->name, dev);
2155         if(err < 0)
2156                 goto out_sleep;
2157
2158         napi_enable(&tp->napi);
2159
2160         err = typhoon_start_runtime(tp);
2161         if(err < 0) {
2162                 napi_disable(&tp->napi);
2163                 goto out_irq;
2164         }
2165
2166         netif_start_queue(dev);
2167         return 0;
2168
2169 out_irq:
2170         free_irq(dev->irq, dev);
2171
2172 out_sleep:
2173         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2174                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2175                                 dev->name);
2176                 typhoon_reset(tp->ioaddr, NoWait);
2177                 goto out;
2178         }
2179
2180         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2181                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2182
2183 out:
2184         return err;
2185 }
2186
2187 static int
2188 typhoon_close(struct net_device *dev)
2189 {
2190         struct typhoon *tp = netdev_priv(dev);
2191
2192         netif_stop_queue(dev);
2193         napi_disable(&tp->napi);
2194
2195         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2196                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2197
2198         /* Make sure there is no irq handler running on a different CPU. */
2199         free_irq(dev->irq, dev);
2200
2201         typhoon_free_rx_rings(tp);
2202         typhoon_init_rings(tp);
2203
2204         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2205                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2206
2207         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2208                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2209
2210         return 0;
2211 }
2212
2213 #ifdef CONFIG_PM
2214 static int
2215 typhoon_resume(struct pci_dev *pdev)
2216 {
2217         struct net_device *dev = pci_get_drvdata(pdev);
2218         struct typhoon *tp = netdev_priv(dev);
2219
2220         /* If we're down, resume when we are upped.
2221          */
2222         if(!netif_running(dev))
2223                 return 0;
2224
2225         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2226                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2227                                 dev->name);
2228                 goto reset;
2229         }
2230
2231         if(typhoon_start_runtime(tp) < 0) {
2232                 printk(KERN_ERR "%s: critical: could not start runtime in "
2233                                 "resume\n", dev->name);
2234                 goto reset;
2235         }
2236
2237         netif_device_attach(dev);
2238         return 0;
2239
2240 reset:
2241         typhoon_reset(tp->ioaddr, NoWait);
2242         return -EBUSY;
2243 }
2244
2245 static int
2246 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2247 {
2248         struct net_device *dev = pci_get_drvdata(pdev);
2249         struct typhoon *tp = netdev_priv(dev);
2250         struct cmd_desc xp_cmd;
2251
2252         /* If we're down, we're already suspended.
2253          */
2254         if(!netif_running(dev))
2255                 return 0;
2256
2257         spin_lock_bh(&tp->state_lock);
2258         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2259                 spin_unlock_bh(&tp->state_lock);
2260                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2261                                 dev->name);
2262                 return -EBUSY;
2263         }
2264         spin_unlock_bh(&tp->state_lock);
2265
2266         netif_device_detach(dev);
2267
2268         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2269                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2270                 goto need_resume;
2271         }
2272
2273         typhoon_free_rx_rings(tp);
2274         typhoon_init_rings(tp);
2275
2276         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2277                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2278                 goto need_resume;
2279         }
2280
2281         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2282         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2283         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2284         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2285                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2286                                 dev->name);
2287                 goto need_resume;
2288         }
2289
2290         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2291         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2292         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2293                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2294                                 dev->name);
2295                 goto need_resume;
2296         }
2297
2298         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2299                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2300                 goto need_resume;
2301         }
2302
2303         return 0;
2304
2305 need_resume:
2306         typhoon_resume(pdev);
2307         return -EBUSY;
2308 }
2309 #endif
2310
2311 static int __devinit
2312 typhoon_test_mmio(struct pci_dev *pdev)
2313 {
2314         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2315         int mode = 0;
2316         u32 val;
2317
2318         if(!ioaddr)
2319                 goto out;
2320
2321         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2322                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2323                 goto out_unmap;
2324
2325         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2326         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2327         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2328
2329         /* Ok, see if we can change our interrupt status register by
2330          * sending ourselves an interrupt. If so, then MMIO works.
2331          * The 50usec delay is arbitrary -- it could probably be smaller.
2332          */
2333         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2334         if((val & TYPHOON_INTR_SELF) == 0) {
2335                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2336                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2337                 udelay(50);
2338                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2339                 if(val & TYPHOON_INTR_SELF)
2340                         mode = 1;
2341         }
2342
2343         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2344         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2345         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2346         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2347
2348 out_unmap:
2349         pci_iounmap(pdev, ioaddr);
2350
2351 out:
2352         if(!mode)
2353                 printk(KERN_INFO PFX "falling back to port IO\n");
2354         return mode;
2355 }
2356
2357 static const struct net_device_ops typhoon_netdev_ops = {
2358         .ndo_open               = typhoon_open,
2359         .ndo_stop               = typhoon_close,
2360         .ndo_start_xmit         = typhoon_start_tx,
2361         .ndo_set_multicast_list = typhoon_set_rx_mode,
2362         .ndo_tx_timeout         = typhoon_tx_timeout,
2363         .ndo_get_stats          = typhoon_get_stats,
2364         .ndo_validate_addr      = eth_validate_addr,
2365         .ndo_set_mac_address    = typhoon_set_mac_address,
2366         .ndo_change_mtu         = eth_change_mtu,
2367         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2368 };
2369
2370 static int __devinit
2371 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2372 {
2373         static int did_version = 0;
2374         struct net_device *dev;
2375         struct typhoon *tp;
2376         int card_id = (int) ent->driver_data;
2377         void __iomem *ioaddr;
2378         void *shared;
2379         dma_addr_t shared_dma;
2380         struct cmd_desc xp_cmd;
2381         struct resp_desc xp_resp[3];
2382         int err = 0;
2383
2384         if(!did_version++)
2385                 printk(KERN_INFO "%s", version);
2386
2387         dev = alloc_etherdev(sizeof(*tp));
2388         if(dev == NULL) {
2389                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2390                        pci_name(pdev));
2391                 err = -ENOMEM;
2392                 goto error_out;
2393         }
2394         SET_NETDEV_DEV(dev, &pdev->dev);
2395
2396         err = pci_enable_device(pdev);
2397         if(err < 0) {
2398                 printk(ERR_PFX "%s: unable to enable device\n",
2399                        pci_name(pdev));
2400                 goto error_out_dev;
2401         }
2402
2403         err = pci_set_mwi(pdev);
2404         if(err < 0) {
2405                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2406                 goto error_out_disable;
2407         }
2408
2409         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2410         if(err < 0) {
2411                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2412                        pci_name(pdev));
2413                 goto error_out_mwi;
2414         }
2415
2416         /* sanity checks on IO and MMIO BARs
2417          */
2418         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2419                 printk(ERR_PFX
2420                        "%s: region #1 not a PCI IO resource, aborting\n",
2421                        pci_name(pdev));
2422                 err = -ENODEV;
2423                 goto error_out_mwi;
2424         }
2425         if(pci_resource_len(pdev, 0) < 128) {
2426                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2427                        pci_name(pdev));
2428                 err = -ENODEV;
2429                 goto error_out_mwi;
2430         }
2431         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2432                 printk(ERR_PFX
2433                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2434                        pci_name(pdev));
2435                 err = -ENODEV;
2436                 goto error_out_mwi;
2437         }
2438         if(pci_resource_len(pdev, 1) < 128) {
2439                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2440                        pci_name(pdev));
2441                 err = -ENODEV;
2442                 goto error_out_mwi;
2443         }
2444
2445         err = pci_request_regions(pdev, "typhoon");
2446         if(err < 0) {
2447                 printk(ERR_PFX "%s: could not request regions\n",
2448                        pci_name(pdev));
2449                 goto error_out_mwi;
2450         }
2451
2452         /* map our registers
2453          */
2454         if(use_mmio != 0 && use_mmio != 1)
2455                 use_mmio = typhoon_test_mmio(pdev);
2456
2457         ioaddr = pci_iomap(pdev, use_mmio, 128);
2458         if (!ioaddr) {
2459                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2460                        pci_name(pdev));
2461                 err = -EIO;
2462                 goto error_out_regions;
2463         }
2464
2465         /* allocate pci dma space for rx and tx descriptor rings
2466          */
2467         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2468                                       &shared_dma);
2469         if(!shared) {
2470                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2471                        pci_name(pdev));
2472                 err = -ENOMEM;
2473                 goto error_out_remap;
2474         }
2475
2476         dev->irq = pdev->irq;
2477         tp = netdev_priv(dev);
2478         tp->shared = (struct typhoon_shared *) shared;
2479         tp->shared_dma = shared_dma;
2480         tp->pdev = pdev;
2481         tp->tx_pdev = pdev;
2482         tp->ioaddr = ioaddr;
2483         tp->tx_ioaddr = ioaddr;
2484         tp->dev = dev;
2485
2486         /* Init sequence:
2487          * 1) Reset the adapter to clear any bad juju
2488          * 2) Reload the sleep image
2489          * 3) Boot the sleep image
2490          * 4) Get the hardware address.
2491          * 5) Put the card to sleep.
2492          */
2493         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2494                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2495                 err = -EIO;
2496                 goto error_out_dma;
2497         }
2498
2499         /* Now that we've reset the 3XP and are sure it's not going to
2500          * write all over memory, enable bus mastering, and save our
2501          * state for resuming after a suspend.
2502          */
2503         pci_set_master(pdev);
2504         pci_save_state(pdev);
2505
2506         /* dev->name is not valid until we register, but we need to
2507          * use some common routines to initialize the card. So that those
2508          * routines print the right name, we keep our oun pointer to the name
2509          */
2510         tp->name = pci_name(pdev);
2511
2512         typhoon_init_interface(tp);
2513         typhoon_init_rings(tp);
2514
2515         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2516                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2517                        pci_name(pdev));
2518                 err = -EIO;
2519                 goto error_out_reset;
2520         }
2521
2522         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2523         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2524                 printk(ERR_PFX "%s: cannot read MAC address\n",
2525                        pci_name(pdev));
2526                 err = -EIO;
2527                 goto error_out_reset;
2528         }
2529
2530         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2531         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2532
2533         if(!is_valid_ether_addr(dev->dev_addr)) {
2534                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2535                        "aborting\n", pci_name(pdev));
2536                 goto error_out_reset;
2537         }
2538
2539         /* Read the Sleep Image version last, so the response is valid
2540          * later when we print out the version reported.
2541          */
2542         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2543         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2544                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2545                         pci_name(pdev));
2546                 goto error_out_reset;
2547         }
2548
2549         tp->capabilities = typhoon_card_info[card_id].capabilities;
2550         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2551
2552         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2553          * READ_VERSIONS command. Those versions are OK after waking up
2554          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2555          * seem to need a little extra help to get started. Since we don't
2556          * know how to nudge it along, just kick it.
2557          */
2558         if(xp_resp[0].numDesc != 0)
2559                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2560
2561         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2562                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2563                        pci_name(pdev));
2564                 err = -EIO;
2565                 goto error_out_reset;
2566         }
2567
2568         /* The chip-specific entries in the device structure. */
2569         dev->netdev_ops         = &typhoon_netdev_ops;
2570         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2571         dev->watchdog_timeo     = TX_TIMEOUT;
2572
2573         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2574
2575         /* We can handle scatter gather, up to 16 entries, and
2576          * we can do IP checksumming (only version 4, doh...)
2577          */
2578         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2579         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2580         dev->features |= NETIF_F_TSO;
2581
2582         if(register_netdev(dev) < 0)
2583                 goto error_out_reset;
2584
2585         /* fixup our local name */
2586         tp->name = dev->name;
2587
2588         pci_set_drvdata(pdev, dev);
2589
2590         printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2591                dev->name, typhoon_card_info[card_id].name,
2592                use_mmio ? "MMIO" : "IO",
2593                (unsigned long long)pci_resource_start(pdev, use_mmio),
2594                dev->dev_addr);
2595
2596         /* xp_resp still contains the response to the READ_VERSIONS command.
2597          * For debugging, let the user know what version he has.
2598          */
2599         if(xp_resp[0].numDesc == 0) {
2600                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2601                  * of version is Month/Day of build.
2602                  */
2603                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2604                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2605                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2606                         monthday & 0xff);
2607         } else if(xp_resp[0].numDesc == 2) {
2608                 /* This is the Typhoon 1.1+ type Sleep Image
2609                  */
2610                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2611                 u8 *ver_string = (u8 *) &xp_resp[1];
2612                 ver_string[25] = 0;
2613                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2614                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2615                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2616                         ver_string);
2617         } else {
2618                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2619                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2620                         le32_to_cpu(xp_resp[0].parm2));
2621         }
2622
2623         return 0;
2624
2625 error_out_reset:
2626         typhoon_reset(ioaddr, NoWait);
2627
2628 error_out_dma:
2629         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2630                             shared, shared_dma);
2631 error_out_remap:
2632         pci_iounmap(pdev, ioaddr);
2633 error_out_regions:
2634         pci_release_regions(pdev);
2635 error_out_mwi:
2636         pci_clear_mwi(pdev);
2637 error_out_disable:
2638         pci_disable_device(pdev);
2639 error_out_dev:
2640         free_netdev(dev);
2641 error_out:
2642         return err;
2643 }
2644
2645 static void __devexit
2646 typhoon_remove_one(struct pci_dev *pdev)
2647 {
2648         struct net_device *dev = pci_get_drvdata(pdev);
2649         struct typhoon *tp = netdev_priv(dev);
2650
2651         unregister_netdev(dev);
2652         pci_set_power_state(pdev, PCI_D0);
2653         pci_restore_state(pdev);
2654         typhoon_reset(tp->ioaddr, NoWait);
2655         pci_iounmap(pdev, tp->ioaddr);
2656         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2657                             tp->shared, tp->shared_dma);
2658         pci_release_regions(pdev);
2659         pci_clear_mwi(pdev);
2660         pci_disable_device(pdev);
2661         pci_set_drvdata(pdev, NULL);
2662         free_netdev(dev);
2663 }
2664
2665 static struct pci_driver typhoon_driver = {
2666         .name           = DRV_MODULE_NAME,
2667         .id_table       = typhoon_pci_tbl,
2668         .probe          = typhoon_init_one,
2669         .remove         = __devexit_p(typhoon_remove_one),
2670 #ifdef CONFIG_PM
2671         .suspend        = typhoon_suspend,
2672         .resume         = typhoon_resume,
2673 #endif
2674 };
2675
2676 static int __init
2677 typhoon_init(void)
2678 {
2679         return pci_register_driver(&typhoon_driver);
2680 }
2681
2682 static void __exit
2683 typhoon_cleanup(void)
2684 {
2685         if (typhoon_fw)
2686                 release_firmware(typhoon_fw);
2687         pci_unregister_driver(&typhoon_driver);
2688 }
2689
2690 module_init(typhoon_init);
2691 module_exit(typhoon_cleanup);