drivers/net/skfp: Remove dead code referencing pci_find_device()
[linux-2.6] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.8"
104 #define DRV_MODULE_RELDATE      "06/11/09"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/mm.h>
121 #include <linux/init.h>
122 #include <linux/delay.h>
123 #include <linux/ethtool.h>
124 #include <linux/if_vlan.h>
125 #include <linux/crc32.h>
126 #include <linux/bitops.h>
127 #include <asm/processor.h>
128 #include <asm/io.h>
129 #include <asm/uaccess.h>
130 #include <linux/in6.h>
131 #include <linux/version.h>
132 #include <linux/dma-mapping.h>
133
134 #include "typhoon.h"
135 #include "typhoon-firmware.h"
136
137 static const char version[] __devinitdata =
138     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
144 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
145                                "the buffer given back to the NIC. Default "
146                                "is 200.");
147 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
148                            "Default is to try MMIO and fallback to PIO.");
149 module_param(rx_copybreak, int, 0);
150 module_param(use_mmio, int, 0);
151
152 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
153 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
154 #undef NETIF_F_TSO
155 #endif
156
157 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
158 #error TX ring too small!
159 #endif
160
161 struct typhoon_card_info {
162         char *name;
163         int capabilities;
164 };
165
166 #define TYPHOON_CRYPTO_NONE             0x00
167 #define TYPHOON_CRYPTO_DES              0x01
168 #define TYPHOON_CRYPTO_3DES             0x02
169 #define TYPHOON_CRYPTO_VARIABLE         0x04
170 #define TYPHOON_FIBER                   0x08
171 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
172
173 enum typhoon_cards {
174         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
175         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
176         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
177         TYPHOON_FXM,
178 };
179
180 /* directly indexed by enum typhoon_cards, above */
181 static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
182         { "3Com Typhoon (3C990-TX)",
183                 TYPHOON_CRYPTO_NONE},
184         { "3Com Typhoon (3CR990-TX-95)",
185                 TYPHOON_CRYPTO_DES},
186         { "3Com Typhoon (3CR990-TX-97)",
187                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
188         { "3Com Typhoon (3C990SVR)",
189                 TYPHOON_CRYPTO_NONE},
190         { "3Com Typhoon (3CR990SVR95)",
191                 TYPHOON_CRYPTO_DES},
192         { "3Com Typhoon (3CR990SVR97)",
193                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
194         { "3Com Typhoon2 (3C990B-TX-M)",
195                 TYPHOON_CRYPTO_VARIABLE},
196         { "3Com Typhoon2 (3C990BSVR)",
197                 TYPHOON_CRYPTO_VARIABLE},
198         { "3Com Typhoon (3CR990-FX-95)",
199                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200         { "3Com Typhoon (3CR990-FX-97)",
201                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202         { "3Com Typhoon (3CR990-FX-95 Server)",
203                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
204         { "3Com Typhoon (3CR990-FX-97 Server)",
205                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
206         { "3Com Typhoon2 (3C990B-FX-97)",
207                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
208 };
209
210 /* Notes on the new subsystem numbering scheme:
211  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
212  * bit 4 indicates if this card has secured firmware (we don't support it)
213  * bit 8 indicates if this is a (0) copper or (1) fiber card
214  * bits 12-16 indicate card type: (0) client and (1) server
215  */
216 static struct pci_device_id typhoon_pci_tbl[] = {
217         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
219         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
221         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
223         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
225         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
226           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
227         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
228           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
229         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
231         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
233         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
234           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
235         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
236           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
237         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
239         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
241         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
243         { 0, }
244 };
245 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
246
247 /* Define the shared memory area
248  * Align everything the 3XP will normally be using.
249  * We'll need to move/align txHi if we start using that ring.
250  */
251 #define __3xp_aligned   ____cacheline_aligned
252 struct typhoon_shared {
253         struct typhoon_interface        iface;
254         struct typhoon_indexes          indexes                 __3xp_aligned;
255         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
256         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
257         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
258         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
259         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
260         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
261         u32                             zeroWord;
262         struct tx_desc                  txHi[TXHI_ENTRIES];
263 } __attribute__ ((packed));
264
265 struct rxbuff_ent {
266         struct sk_buff *skb;
267         dma_addr_t      dma_addr;
268 };
269
270 struct typhoon {
271         /* Tx cache line section */
272         struct transmit_ring    txLoRing        ____cacheline_aligned;
273         struct pci_dev *        tx_pdev;
274         void __iomem            *tx_ioaddr;
275         u32                     txlo_dma_addr;
276
277         /* Irq/Rx cache line section */
278         void __iomem            *ioaddr         ____cacheline_aligned;
279         struct typhoon_indexes *indexes;
280         u8                      awaiting_resp;
281         u8                      duplex;
282         u8                      speed;
283         u8                      card_state;
284         struct basic_ring       rxLoRing;
285         struct pci_dev *        pdev;
286         struct net_device *     dev;
287         struct napi_struct      napi;
288         spinlock_t              state_lock;
289         struct vlan_group *     vlgrp;
290         struct basic_ring       rxHiRing;
291         struct basic_ring       rxBuffRing;
292         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
293
294         /* general section */
295         spinlock_t              command_lock    ____cacheline_aligned;
296         struct basic_ring       cmdRing;
297         struct basic_ring       respRing;
298         struct net_device_stats stats;
299         struct net_device_stats stats_saved;
300         const char *            name;
301         struct typhoon_shared * shared;
302         dma_addr_t              shared_dma;
303         u16                     xcvr_select;
304         u16                     wol_events;
305         u32                     offload;
306
307         /* unused stuff (future use) */
308         int                     capabilities;
309         struct transmit_ring    txHiRing;
310 };
311
312 enum completion_wait_values {
313         NoWait = 0, WaitNoSleep, WaitSleep,
314 };
315
316 /* These are the values for the typhoon.card_state variable.
317  * These determine where the statistics will come from in get_stats().
318  * The sleep image does not support the statistics we need.
319  */
320 enum state_values {
321         Sleeping = 0, Running,
322 };
323
324 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
325  * cannot pass a read, so this forces current writes to post.
326  */
327 #define typhoon_post_pci_writes(x) \
328         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
329
330 /* We'll wait up to six seconds for a reset, and half a second normally.
331  */
332 #define TYPHOON_UDELAY                  50
333 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
334 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
335 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
336
337 #define typhoon_synchronize_irq(x) synchronize_irq(x)
338
339 #if defined(NETIF_F_TSO)
340 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
341 #define TSO_NUM_DESCRIPTORS     2
342 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
343 #else
344 #define NETIF_F_TSO             0
345 #define skb_tso_size(x)         0
346 #define TSO_NUM_DESCRIPTORS     0
347 #define TSO_OFFLOAD_ON          0
348 #endif
349
350 static inline void
351 typhoon_inc_index(u32 *index, const int count, const int num_entries)
352 {
353         /* Increment a ring index -- we can use this for all rings execept
354          * the Rx rings, as they use different size descriptors
355          * otherwise, everything is the same size as a cmd_desc
356          */
357         *index += count * sizeof(struct cmd_desc);
358         *index %= num_entries * sizeof(struct cmd_desc);
359 }
360
361 static inline void
362 typhoon_inc_cmd_index(u32 *index, const int count)
363 {
364         typhoon_inc_index(index, count, COMMAND_ENTRIES);
365 }
366
367 static inline void
368 typhoon_inc_resp_index(u32 *index, const int count)
369 {
370         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
371 }
372
373 static inline void
374 typhoon_inc_rxfree_index(u32 *index, const int count)
375 {
376         typhoon_inc_index(index, count, RXFREE_ENTRIES);
377 }
378
379 static inline void
380 typhoon_inc_tx_index(u32 *index, const int count)
381 {
382         /* if we start using the Hi Tx ring, this needs updateing */
383         typhoon_inc_index(index, count, TXLO_ENTRIES);
384 }
385
386 static inline void
387 typhoon_inc_rx_index(u32 *index, const int count)
388 {
389         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
390         *index += count * sizeof(struct rx_desc);
391         *index %= RX_ENTRIES * sizeof(struct rx_desc);
392 }
393
394 static int
395 typhoon_reset(void __iomem *ioaddr, int wait_type)
396 {
397         int i, err = 0;
398         int timeout;
399
400         if(wait_type == WaitNoSleep)
401                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
402         else
403                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
404
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
406         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
407
408         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
409         typhoon_post_pci_writes(ioaddr);
410         udelay(1);
411         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
412
413         if(wait_type != NoWait) {
414                 for(i = 0; i < timeout; i++) {
415                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
416                            TYPHOON_STATUS_WAITING_FOR_HOST)
417                                 goto out;
418
419                         if(wait_type == WaitSleep)
420                                 schedule_timeout_uninterruptible(1);
421                         else
422                                 udelay(TYPHOON_UDELAY);
423                 }
424
425                 err = -ETIMEDOUT;
426         }
427
428 out:
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
430         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
431
432         /* The 3XP seems to need a little extra time to complete the load
433          * of the sleep image before we can reliably boot it. Failure to
434          * do this occasionally results in a hung adapter after boot in
435          * typhoon_init_one() while trying to read the MAC address or
436          * putting the card to sleep. 3Com's driver waits 5ms, but
437          * that seems to be overkill. However, if we can sleep, we might
438          * as well give it that much time. Otherwise, we'll give it 500us,
439          * which should be enough (I've see it work well at 100us, but still
440          * saw occasional problems.)
441          */
442         if(wait_type == WaitSleep)
443                 msleep(5);
444         else
445                 udelay(500);
446         return err;
447 }
448
449 static int
450 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
451 {
452         int i, err = 0;
453
454         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
455                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
456                         goto out;
457                 udelay(TYPHOON_UDELAY);
458         }
459
460         err = -ETIMEDOUT;
461
462 out:
463         return err;
464 }
465
466 static inline void
467 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
468 {
469         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
470                 netif_carrier_off(dev);
471         else
472                 netif_carrier_on(dev);
473 }
474
475 static inline void
476 typhoon_hello(struct typhoon *tp)
477 {
478         struct basic_ring *ring = &tp->cmdRing;
479         struct cmd_desc *cmd;
480
481         /* We only get a hello request if we've not sent anything to the
482          * card in a long while. If the lock is held, then we're in the
483          * process of issuing a command, so we don't need to respond.
484          */
485         if(spin_trylock(&tp->command_lock)) {
486                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
487                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
488
489                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
490                 smp_wmb();
491                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
492                 spin_unlock(&tp->command_lock);
493         }
494 }
495
496 static int
497 typhoon_process_response(struct typhoon *tp, int resp_size,
498                                 struct resp_desc *resp_save)
499 {
500         struct typhoon_indexes *indexes = tp->indexes;
501         struct resp_desc *resp;
502         u8 *base = tp->respRing.ringBase;
503         int count, len, wrap_len;
504         u32 cleared;
505         u32 ready;
506
507         cleared = le32_to_cpu(indexes->respCleared);
508         ready = le32_to_cpu(indexes->respReady);
509         while(cleared != ready) {
510                 resp = (struct resp_desc *)(base + cleared);
511                 count = resp->numDesc + 1;
512                 if(resp_save && resp->seqNo) {
513                         if(count > resp_size) {
514                                 resp_save->flags = TYPHOON_RESP_ERROR;
515                                 goto cleanup;
516                         }
517
518                         wrap_len = 0;
519                         len = count * sizeof(*resp);
520                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
521                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
522                                 len = RESPONSE_RING_SIZE - cleared;
523                         }
524
525                         memcpy(resp_save, resp, len);
526                         if(unlikely(wrap_len)) {
527                                 resp_save += len / sizeof(*resp);
528                                 memcpy(resp_save, base, wrap_len);
529                         }
530
531                         resp_save = NULL;
532                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
533                         typhoon_media_status(tp->dev, resp);
534                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
535                         typhoon_hello(tp);
536                 } else {
537                         printk(KERN_ERR "%s: dumping unexpected response "
538                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
539                                tp->name, le16_to_cpu(resp->cmd),
540                                resp->numDesc, resp->flags,
541                                le16_to_cpu(resp->parm1),
542                                le32_to_cpu(resp->parm2),
543                                le32_to_cpu(resp->parm3));
544                 }
545
546 cleanup:
547                 typhoon_inc_resp_index(&cleared, count);
548         }
549
550         indexes->respCleared = cpu_to_le32(cleared);
551         wmb();
552         return (resp_save == NULL);
553 }
554
555 static inline int
556 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
557 {
558         /* this works for all descriptors but rx_desc, as they are a
559          * different size than the cmd_desc -- everyone else is the same
560          */
561         lastWrite /= sizeof(struct cmd_desc);
562         lastRead /= sizeof(struct cmd_desc);
563         return (ringSize + lastRead - lastWrite - 1) % ringSize;
564 }
565
566 static inline int
567 typhoon_num_free_cmd(struct typhoon *tp)
568 {
569         int lastWrite = tp->cmdRing.lastWrite;
570         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
571
572         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
573 }
574
575 static inline int
576 typhoon_num_free_resp(struct typhoon *tp)
577 {
578         int respReady = le32_to_cpu(tp->indexes->respReady);
579         int respCleared = le32_to_cpu(tp->indexes->respCleared);
580
581         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
582 }
583
584 static inline int
585 typhoon_num_free_tx(struct transmit_ring *ring)
586 {
587         /* if we start using the Hi Tx ring, this needs updating */
588         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
589 }
590
591 static int
592 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
593                       int num_resp, struct resp_desc *resp)
594 {
595         struct typhoon_indexes *indexes = tp->indexes;
596         struct basic_ring *ring = &tp->cmdRing;
597         struct resp_desc local_resp;
598         int i, err = 0;
599         int got_resp;
600         int freeCmd, freeResp;
601         int len, wrap_len;
602
603         spin_lock(&tp->command_lock);
604
605         freeCmd = typhoon_num_free_cmd(tp);
606         freeResp = typhoon_num_free_resp(tp);
607
608         if(freeCmd < num_cmd || freeResp < num_resp) {
609                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
610                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
611                         freeResp, num_resp);
612                 err = -ENOMEM;
613                 goto out;
614         }
615
616         if(cmd->flags & TYPHOON_CMD_RESPOND) {
617                 /* If we're expecting a response, but the caller hasn't given
618                  * us a place to put it, we'll provide one.
619                  */
620                 tp->awaiting_resp = 1;
621                 if(resp == NULL) {
622                         resp = &local_resp;
623                         num_resp = 1;
624                 }
625         }
626
627         wrap_len = 0;
628         len = num_cmd * sizeof(*cmd);
629         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
630                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
631                 len = COMMAND_RING_SIZE - ring->lastWrite;
632         }
633
634         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
635         if(unlikely(wrap_len)) {
636                 struct cmd_desc *wrap_ptr = cmd;
637                 wrap_ptr += len / sizeof(*cmd);
638                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
639         }
640
641         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
642
643         /* "I feel a presence... another warrior is on the mesa."
644          */
645         wmb();
646         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
647         typhoon_post_pci_writes(tp->ioaddr);
648
649         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
650                 goto out;
651
652         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
653          * preempt or do anything other than take interrupts. So, don't
654          * wait for a response unless you have to.
655          *
656          * I've thought about trying to sleep here, but we're called
657          * from many contexts that don't allow that. Also, given the way
658          * 3Com has implemented irq coalescing, we would likely timeout --
659          * this has been observed in real life!
660          *
661          * The big killer is we have to wait to get stats from the card,
662          * though we could go to a periodic refresh of those if we don't
663          * mind them getting somewhat stale. The rest of the waiting
664          * commands occur during open/close/suspend/resume, so they aren't
665          * time critical. Creating SAs in the future will also have to
666          * wait here.
667          */
668         got_resp = 0;
669         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
670                 if(indexes->respCleared != indexes->respReady)
671                         got_resp = typhoon_process_response(tp, num_resp,
672                                                                 resp);
673                 udelay(TYPHOON_UDELAY);
674         }
675
676         if(!got_resp) {
677                 err = -ETIMEDOUT;
678                 goto out;
679         }
680
681         /* Collect the error response even if we don't care about the
682          * rest of the response
683          */
684         if(resp->flags & TYPHOON_RESP_ERROR)
685                 err = -EIO;
686
687 out:
688         if(tp->awaiting_resp) {
689                 tp->awaiting_resp = 0;
690                 smp_wmb();
691
692                 /* Ugh. If a response was added to the ring between
693                  * the call to typhoon_process_response() and the clearing
694                  * of tp->awaiting_resp, we could have missed the interrupt
695                  * and it could hang in the ring an indeterminate amount of
696                  * time. So, check for it, and interrupt ourselves if this
697                  * is the case.
698                  */
699                 if(indexes->respCleared != indexes->respReady)
700                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
701         }
702
703         spin_unlock(&tp->command_lock);
704         return err;
705 }
706
707 static void
708 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
709 {
710         struct typhoon *tp = netdev_priv(dev);
711         struct cmd_desc xp_cmd;
712         int err;
713
714         spin_lock_bh(&tp->state_lock);
715         if(!tp->vlgrp != !grp) {
716                 /* We've either been turned on for the first time, or we've
717                  * been turned off. Update the 3XP.
718                  */
719                 if(grp)
720                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
721                 else
722                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
723
724                 /* If the interface is up, the runtime is running -- and we
725                  * must be up for the vlan core to call us.
726                  *
727                  * Do the command outside of the spin lock, as it is slow.
728                  */
729                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
730                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
731                 xp_cmd.parm2 = tp->offload;
732                 xp_cmd.parm3 = tp->offload;
733                 spin_unlock_bh(&tp->state_lock);
734                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
735                 if(err < 0)
736                         printk("%s: vlan offload error %d\n", tp->name, -err);
737                 spin_lock_bh(&tp->state_lock);
738         }
739
740         /* now make the change visible */
741         tp->vlgrp = grp;
742         spin_unlock_bh(&tp->state_lock);
743 }
744
745 static inline void
746 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
747                         u32 ring_dma)
748 {
749         struct tcpopt_desc *tcpd;
750         u32 tcpd_offset = ring_dma;
751
752         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
753         tcpd_offset += txRing->lastWrite;
754         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
755         typhoon_inc_tx_index(&txRing->lastWrite, 1);
756
757         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
758         tcpd->numDesc = 1;
759         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
760         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
761         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
762         tcpd->bytesTx = cpu_to_le32(skb->len);
763         tcpd->status = 0;
764 }
765
766 static int
767 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
768 {
769         struct typhoon *tp = netdev_priv(dev);
770         struct transmit_ring *txRing;
771         struct tx_desc *txd, *first_txd;
772         dma_addr_t skb_dma;
773         int numDesc;
774
775         /* we have two rings to choose from, but we only use txLo for now
776          * If we start using the Hi ring as well, we'll need to update
777          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
778          * and TXHI_ENTRIES to match, as well as update the TSO code below
779          * to get the right DMA address
780          */
781         txRing = &tp->txLoRing;
782
783         /* We need one descriptor for each fragment of the sk_buff, plus the
784          * one for the ->data area of it.
785          *
786          * The docs say a maximum of 16 fragment descriptors per TCP option
787          * descriptor, then make a new packet descriptor and option descriptor
788          * for the next 16 fragments. The engineers say just an option
789          * descriptor is needed. I've tested up to 26 fragments with a single
790          * packet descriptor/option descriptor combo, so I use that for now.
791          *
792          * If problems develop with TSO, check this first.
793          */
794         numDesc = skb_shinfo(skb)->nr_frags + 1;
795         if (skb_is_gso(skb))
796                 numDesc++;
797
798         /* When checking for free space in the ring, we need to also
799          * account for the initial Tx descriptor, and we always must leave
800          * at least one descriptor unused in the ring so that it doesn't
801          * wrap and look empty.
802          *
803          * The only time we should loop here is when we hit the race
804          * between marking the queue awake and updating the cleared index.
805          * Just loop and it will appear. This comes from the acenic driver.
806          */
807         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
808                 smp_rmb();
809
810         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
811         typhoon_inc_tx_index(&txRing->lastWrite, 1);
812
813         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
814         first_txd->numDesc = 0;
815         first_txd->len = 0;
816         first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
817         first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
818         first_txd->processFlags = 0;
819
820         if(skb->ip_summed == CHECKSUM_PARTIAL) {
821                 /* The 3XP will figure out if this is UDP/TCP */
822                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
823                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
824                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
825         }
826
827         if(vlan_tx_tag_present(skb)) {
828                 first_txd->processFlags |=
829                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
830                 first_txd->processFlags |=
831                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
832                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
833         }
834
835         if (skb_is_gso(skb)) {
836                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
837                 first_txd->numDesc++;
838
839                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
840         }
841
842         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
843         typhoon_inc_tx_index(&txRing->lastWrite, 1);
844
845         /* No need to worry about padding packet -- the firmware pads
846          * it with zeros to ETH_ZLEN for us.
847          */
848         if(skb_shinfo(skb)->nr_frags == 0) {
849                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
850                                        PCI_DMA_TODEVICE);
851                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
852                 txd->len = cpu_to_le16(skb->len);
853                 txd->addr = cpu_to_le32(skb_dma);
854                 txd->addrHi = 0;
855                 first_txd->numDesc++;
856         } else {
857                 int i, len;
858
859                 len = skb_headlen(skb);
860                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
861                                          PCI_DMA_TODEVICE);
862                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
863                 txd->len = cpu_to_le16(len);
864                 txd->addr = cpu_to_le32(skb_dma);
865                 txd->addrHi = 0;
866                 first_txd->numDesc++;
867
868                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
869                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
870                         void *frag_addr;
871
872                         txd = (struct tx_desc *) (txRing->ringBase +
873                                                 txRing->lastWrite);
874                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
875
876                         len = frag->size;
877                         frag_addr = (void *) page_address(frag->page) +
878                                                 frag->page_offset;
879                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
880                                          PCI_DMA_TODEVICE);
881                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
882                         txd->len = cpu_to_le16(len);
883                         txd->addr = cpu_to_le32(skb_dma);
884                         txd->addrHi = 0;
885                         first_txd->numDesc++;
886                 }
887         }
888
889         /* Kick the 3XP
890          */
891         wmb();
892         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
893
894         dev->trans_start = jiffies;
895
896         /* If we don't have room to put the worst case packet on the
897          * queue, then we must stop the queue. We need 2 extra
898          * descriptors -- one to prevent ring wrap, and one for the
899          * Tx header.
900          */
901         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
902
903         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
904                 netif_stop_queue(dev);
905
906                 /* A Tx complete IRQ could have gotten inbetween, making
907                  * the ring free again. Only need to recheck here, since
908                  * Tx is serialized.
909                  */
910                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
911                         netif_wake_queue(dev);
912         }
913
914         return 0;
915 }
916
917 static void
918 typhoon_set_rx_mode(struct net_device *dev)
919 {
920         struct typhoon *tp = netdev_priv(dev);
921         struct cmd_desc xp_cmd;
922         u32 mc_filter[2];
923         u16 filter;
924
925         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
926         if(dev->flags & IFF_PROMISC) {
927                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
928         } else if((dev->mc_count > multicast_filter_limit) ||
929                   (dev->flags & IFF_ALLMULTI)) {
930                 /* Too many to match, or accept all multicasts. */
931                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
932         } else if(dev->mc_count) {
933                 struct dev_mc_list *mclist;
934                 int i;
935
936                 memset(mc_filter, 0, sizeof(mc_filter));
937                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
938                     i++, mclist = mclist->next) {
939                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
940                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
941                 }
942
943                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
944                                          TYPHOON_CMD_SET_MULTICAST_HASH);
945                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
946                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
947                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
948                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
949
950                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
951         }
952
953         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
954         xp_cmd.parm1 = filter;
955         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
956 }
957
958 static int
959 typhoon_do_get_stats(struct typhoon *tp)
960 {
961         struct net_device_stats *stats = &tp->stats;
962         struct net_device_stats *saved = &tp->stats_saved;
963         struct cmd_desc xp_cmd;
964         struct resp_desc xp_resp[7];
965         struct stats_resp *s = (struct stats_resp *) xp_resp;
966         int err;
967
968         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
969         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
970         if(err < 0)
971                 return err;
972
973         /* 3Com's Linux driver uses txMultipleCollisions as it's
974          * collisions value, but there is some other collision info as well...
975          *
976          * The extra status reported would be a good candidate for
977          * ethtool_ops->get_{strings,stats}()
978          */
979         stats->tx_packets = le32_to_cpu(s->txPackets);
980         stats->tx_bytes = le32_to_cpu(s->txBytes);
981         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
982         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
983         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
984         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
985         stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
986         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
987         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
988                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
989         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
990         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
991         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
992                         SPEED_100 : SPEED_10;
993         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
994                         DUPLEX_FULL : DUPLEX_HALF;
995
996         /* add in the saved statistics
997          */
998         stats->tx_packets += saved->tx_packets;
999         stats->tx_bytes += saved->tx_bytes;
1000         stats->tx_errors += saved->tx_errors;
1001         stats->collisions += saved->collisions;
1002         stats->rx_packets += saved->rx_packets;
1003         stats->rx_bytes += saved->rx_bytes;
1004         stats->rx_fifo_errors += saved->rx_fifo_errors;
1005         stats->rx_errors += saved->rx_errors;
1006         stats->rx_crc_errors += saved->rx_crc_errors;
1007         stats->rx_length_errors += saved->rx_length_errors;
1008
1009         return 0;
1010 }
1011
1012 static struct net_device_stats *
1013 typhoon_get_stats(struct net_device *dev)
1014 {
1015         struct typhoon *tp = netdev_priv(dev);
1016         struct net_device_stats *stats = &tp->stats;
1017         struct net_device_stats *saved = &tp->stats_saved;
1018
1019         smp_rmb();
1020         if(tp->card_state == Sleeping)
1021                 return saved;
1022
1023         if(typhoon_do_get_stats(tp) < 0) {
1024                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1025                 return saved;
1026         }
1027
1028         return stats;
1029 }
1030
1031 static int
1032 typhoon_set_mac_address(struct net_device *dev, void *addr)
1033 {
1034         struct sockaddr *saddr = (struct sockaddr *) addr;
1035
1036         if(netif_running(dev))
1037                 return -EBUSY;
1038
1039         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1040         return 0;
1041 }
1042
1043 static void
1044 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1045 {
1046         struct typhoon *tp = netdev_priv(dev);
1047         struct pci_dev *pci_dev = tp->pdev;
1048         struct cmd_desc xp_cmd;
1049         struct resp_desc xp_resp[3];
1050
1051         smp_rmb();
1052         if(tp->card_state == Sleeping) {
1053                 strcpy(info->fw_version, "Sleep image");
1054         } else {
1055                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1056                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1057                         strcpy(info->fw_version, "Unknown runtime");
1058                 } else {
1059                         u32 sleep_ver = xp_resp[0].parm2;
1060                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1061                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1062                                  sleep_ver & 0xfff);
1063                 }
1064         }
1065
1066         strcpy(info->driver, DRV_MODULE_NAME);
1067         strcpy(info->version, DRV_MODULE_VERSION);
1068         strcpy(info->bus_info, pci_name(pci_dev));
1069 }
1070
1071 static int
1072 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1073 {
1074         struct typhoon *tp = netdev_priv(dev);
1075
1076         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1077                                 SUPPORTED_Autoneg;
1078
1079         switch (tp->xcvr_select) {
1080         case TYPHOON_XCVR_10HALF:
1081                 cmd->advertising = ADVERTISED_10baseT_Half;
1082                 break;
1083         case TYPHOON_XCVR_10FULL:
1084                 cmd->advertising = ADVERTISED_10baseT_Full;
1085                 break;
1086         case TYPHOON_XCVR_100HALF:
1087                 cmd->advertising = ADVERTISED_100baseT_Half;
1088                 break;
1089         case TYPHOON_XCVR_100FULL:
1090                 cmd->advertising = ADVERTISED_100baseT_Full;
1091                 break;
1092         case TYPHOON_XCVR_AUTONEG:
1093                 cmd->advertising = ADVERTISED_10baseT_Half |
1094                                             ADVERTISED_10baseT_Full |
1095                                             ADVERTISED_100baseT_Half |
1096                                             ADVERTISED_100baseT_Full |
1097                                             ADVERTISED_Autoneg;
1098                 break;
1099         }
1100
1101         if(tp->capabilities & TYPHOON_FIBER) {
1102                 cmd->supported |= SUPPORTED_FIBRE;
1103                 cmd->advertising |= ADVERTISED_FIBRE;
1104                 cmd->port = PORT_FIBRE;
1105         } else {
1106                 cmd->supported |= SUPPORTED_10baseT_Half |
1107                                         SUPPORTED_10baseT_Full |
1108                                         SUPPORTED_TP;
1109                 cmd->advertising |= ADVERTISED_TP;
1110                 cmd->port = PORT_TP;
1111         }
1112
1113         /* need to get stats to make these link speed/duplex valid */
1114         typhoon_do_get_stats(tp);
1115         cmd->speed = tp->speed;
1116         cmd->duplex = tp->duplex;
1117         cmd->phy_address = 0;
1118         cmd->transceiver = XCVR_INTERNAL;
1119         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1120                 cmd->autoneg = AUTONEG_ENABLE;
1121         else
1122                 cmd->autoneg = AUTONEG_DISABLE;
1123         cmd->maxtxpkt = 1;
1124         cmd->maxrxpkt = 1;
1125
1126         return 0;
1127 }
1128
1129 static int
1130 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1131 {
1132         struct typhoon *tp = netdev_priv(dev);
1133         struct cmd_desc xp_cmd;
1134         int xcvr;
1135         int err;
1136
1137         err = -EINVAL;
1138         if(cmd->autoneg == AUTONEG_ENABLE) {
1139                 xcvr = TYPHOON_XCVR_AUTONEG;
1140         } else {
1141                 if(cmd->duplex == DUPLEX_HALF) {
1142                         if(cmd->speed == SPEED_10)
1143                                 xcvr = TYPHOON_XCVR_10HALF;
1144                         else if(cmd->speed == SPEED_100)
1145                                 xcvr = TYPHOON_XCVR_100HALF;
1146                         else
1147                                 goto out;
1148                 } else if(cmd->duplex == DUPLEX_FULL) {
1149                         if(cmd->speed == SPEED_10)
1150                                 xcvr = TYPHOON_XCVR_10FULL;
1151                         else if(cmd->speed == SPEED_100)
1152                                 xcvr = TYPHOON_XCVR_100FULL;
1153                         else
1154                                 goto out;
1155                 } else
1156                         goto out;
1157         }
1158
1159         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1160         xp_cmd.parm1 = cpu_to_le16(xcvr);
1161         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1162         if(err < 0)
1163                 goto out;
1164
1165         tp->xcvr_select = xcvr;
1166         if(cmd->autoneg == AUTONEG_ENABLE) {
1167                 tp->speed = 0xff;       /* invalid */
1168                 tp->duplex = 0xff;      /* invalid */
1169         } else {
1170                 tp->speed = cmd->speed;
1171                 tp->duplex = cmd->duplex;
1172         }
1173
1174 out:
1175         return err;
1176 }
1177
1178 static void
1179 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1180 {
1181         struct typhoon *tp = netdev_priv(dev);
1182
1183         wol->supported = WAKE_PHY | WAKE_MAGIC;
1184         wol->wolopts = 0;
1185         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1186                 wol->wolopts |= WAKE_PHY;
1187         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1188                 wol->wolopts |= WAKE_MAGIC;
1189         memset(&wol->sopass, 0, sizeof(wol->sopass));
1190 }
1191
1192 static int
1193 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1194 {
1195         struct typhoon *tp = netdev_priv(dev);
1196
1197         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1198                 return -EINVAL;
1199
1200         tp->wol_events = 0;
1201         if(wol->wolopts & WAKE_PHY)
1202                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1203         if(wol->wolopts & WAKE_MAGIC)
1204                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1205
1206         return 0;
1207 }
1208
1209 static u32
1210 typhoon_get_rx_csum(struct net_device *dev)
1211 {
1212         /* For now, we don't allow turning off RX checksums.
1213          */
1214         return 1;
1215 }
1216
1217 static void
1218 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1219 {
1220         ering->rx_max_pending = RXENT_ENTRIES;
1221         ering->rx_mini_max_pending = 0;
1222         ering->rx_jumbo_max_pending = 0;
1223         ering->tx_max_pending = TXLO_ENTRIES - 1;
1224
1225         ering->rx_pending = RXENT_ENTRIES;
1226         ering->rx_mini_pending = 0;
1227         ering->rx_jumbo_pending = 0;
1228         ering->tx_pending = TXLO_ENTRIES - 1;
1229 }
1230
1231 static const struct ethtool_ops typhoon_ethtool_ops = {
1232         .get_settings           = typhoon_get_settings,
1233         .set_settings           = typhoon_set_settings,
1234         .get_drvinfo            = typhoon_get_drvinfo,
1235         .get_wol                = typhoon_get_wol,
1236         .set_wol                = typhoon_set_wol,
1237         .get_link               = ethtool_op_get_link,
1238         .get_rx_csum            = typhoon_get_rx_csum,
1239         .get_tx_csum            = ethtool_op_get_tx_csum,
1240         .set_tx_csum            = ethtool_op_set_tx_csum,
1241         .get_sg                 = ethtool_op_get_sg,
1242         .set_sg                 = ethtool_op_set_sg,
1243         .get_tso                = ethtool_op_get_tso,
1244         .set_tso                = ethtool_op_set_tso,
1245         .get_ringparam          = typhoon_get_ringparam,
1246 };
1247
1248 static int
1249 typhoon_wait_interrupt(void __iomem *ioaddr)
1250 {
1251         int i, err = 0;
1252
1253         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1254                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1255                    TYPHOON_INTR_BOOTCMD)
1256                         goto out;
1257                 udelay(TYPHOON_UDELAY);
1258         }
1259
1260         err = -ETIMEDOUT;
1261
1262 out:
1263         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1264         return err;
1265 }
1266
1267 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1268
1269 static void
1270 typhoon_init_interface(struct typhoon *tp)
1271 {
1272         struct typhoon_interface *iface = &tp->shared->iface;
1273         dma_addr_t shared_dma;
1274
1275         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1276
1277         /* The *Hi members of iface are all init'd to zero by the memset().
1278          */
1279         shared_dma = tp->shared_dma + shared_offset(indexes);
1280         iface->ringIndex = cpu_to_le32(shared_dma);
1281
1282         shared_dma = tp->shared_dma + shared_offset(txLo);
1283         iface->txLoAddr = cpu_to_le32(shared_dma);
1284         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1285
1286         shared_dma = tp->shared_dma + shared_offset(txHi);
1287         iface->txHiAddr = cpu_to_le32(shared_dma);
1288         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1291         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1292         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1293                                         sizeof(struct rx_free));
1294
1295         shared_dma = tp->shared_dma + shared_offset(rxLo);
1296         iface->rxLoAddr = cpu_to_le32(shared_dma);
1297         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1298
1299         shared_dma = tp->shared_dma + shared_offset(rxHi);
1300         iface->rxHiAddr = cpu_to_le32(shared_dma);
1301         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1302
1303         shared_dma = tp->shared_dma + shared_offset(cmd);
1304         iface->cmdAddr = cpu_to_le32(shared_dma);
1305         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1306
1307         shared_dma = tp->shared_dma + shared_offset(resp);
1308         iface->respAddr = cpu_to_le32(shared_dma);
1309         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1310
1311         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1312         iface->zeroAddr = cpu_to_le32(shared_dma);
1313
1314         tp->indexes = &tp->shared->indexes;
1315         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1316         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1317         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1318         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1319         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1320         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1321         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1322
1323         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1324         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1325
1326         tp->txlo_dma_addr = iface->txLoAddr;
1327         tp->card_state = Sleeping;
1328         smp_wmb();
1329
1330         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1331         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1332
1333         spin_lock_init(&tp->command_lock);
1334         spin_lock_init(&tp->state_lock);
1335 }
1336
1337 static void
1338 typhoon_init_rings(struct typhoon *tp)
1339 {
1340         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1341
1342         tp->txLoRing.lastWrite = 0;
1343         tp->txHiRing.lastWrite = 0;
1344         tp->rxLoRing.lastWrite = 0;
1345         tp->rxHiRing.lastWrite = 0;
1346         tp->rxBuffRing.lastWrite = 0;
1347         tp->cmdRing.lastWrite = 0;
1348         tp->cmdRing.lastWrite = 0;
1349
1350         tp->txLoRing.lastRead = 0;
1351         tp->txHiRing.lastRead = 0;
1352 }
1353
1354 static int
1355 typhoon_download_firmware(struct typhoon *tp)
1356 {
1357         void __iomem *ioaddr = tp->ioaddr;
1358         struct pci_dev *pdev = tp->pdev;
1359         struct typhoon_file_header *fHdr;
1360         struct typhoon_section_header *sHdr;
1361         u8 *image_data;
1362         void *dpage;
1363         dma_addr_t dpage_dma;
1364         unsigned int csum;
1365         u32 irqEnabled;
1366         u32 irqMasked;
1367         u32 numSections;
1368         u32 section_len;
1369         u32 len;
1370         u32 load_addr;
1371         u32 hmac;
1372         int i;
1373         int err;
1374
1375         err = -EINVAL;
1376         fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1377         image_data = (u8 *) fHdr;
1378
1379         if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1380                 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1381                 goto err_out;
1382         }
1383
1384         /* Cannot just map the firmware image using pci_map_single() as
1385          * the firmware is part of the kernel/module image, so we allocate
1386          * some consistent memory to copy the sections into, as it is simpler,
1387          * and short-lived. If we ever split out and require a userland
1388          * firmware loader, then we can revisit this.
1389          */
1390         err = -ENOMEM;
1391         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1392         if(!dpage) {
1393                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1394                 goto err_out;
1395         }
1396
1397         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1398         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1399                ioaddr + TYPHOON_REG_INTR_ENABLE);
1400         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1401         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1402                ioaddr + TYPHOON_REG_INTR_MASK);
1403
1404         err = -ETIMEDOUT;
1405         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1406                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1407                 goto err_out_irq;
1408         }
1409
1410         numSections = le32_to_cpu(fHdr->numSections);
1411         load_addr = le32_to_cpu(fHdr->startAddr);
1412
1413         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1414         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1415         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1416         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1417         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1418         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1419         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1420         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1421         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1422         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1423         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1424         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1425         typhoon_post_pci_writes(ioaddr);
1426         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1427
1428         image_data += sizeof(struct typhoon_file_header);
1429
1430         /* The ioread32() in typhoon_wait_interrupt() will force the
1431          * last write to the command register to post, so
1432          * we don't need a typhoon_post_pci_writes() after it.
1433          */
1434         for(i = 0; i < numSections; i++) {
1435                 sHdr = (struct typhoon_section_header *) image_data;
1436                 image_data += sizeof(struct typhoon_section_header);
1437                 load_addr = le32_to_cpu(sHdr->startAddr);
1438                 section_len = le32_to_cpu(sHdr->len);
1439
1440                 while(section_len) {
1441                         len = min_t(u32, section_len, PAGE_SIZE);
1442
1443                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1444                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1445                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1446                                 printk(KERN_ERR "%s: segment ready timeout\n",
1447                                        tp->name);
1448                                 goto err_out_irq;
1449                         }
1450
1451                         /* Do an pseudo IPv4 checksum on the data -- first
1452                          * need to convert each u16 to cpu order before
1453                          * summing. Fortunately, due to the properties of
1454                          * the checksum, we can do this once, at the end.
1455                          */
1456                         csum = csum_partial_copy_nocheck(image_data, dpage,
1457                                                          len, 0);
1458                         csum = csum_fold(csum);
1459                         csum = le16_to_cpu(csum);
1460
1461                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1462                         iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1463                         iowrite32(load_addr,
1464                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1465                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1466                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1467                         typhoon_post_pci_writes(ioaddr);
1468                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1469                                ioaddr + TYPHOON_REG_COMMAND);
1470
1471                         image_data += len;
1472                         load_addr += len;
1473                         section_len -= len;
1474                 }
1475         }
1476
1477         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1478            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1479            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1480                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1481                 goto err_out_irq;
1482         }
1483
1484         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1485
1486         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1487                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1488                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1489                 goto err_out_irq;
1490         }
1491
1492         err = 0;
1493
1494 err_out_irq:
1495         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1496         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1497
1498         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1499
1500 err_out:
1501         return err;
1502 }
1503
1504 static int
1505 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1506 {
1507         void __iomem *ioaddr = tp->ioaddr;
1508
1509         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1510                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1511                 goto out_timeout;
1512         }
1513
1514         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1515         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1516         typhoon_post_pci_writes(ioaddr);
1517         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1518                                 ioaddr + TYPHOON_REG_COMMAND);
1519
1520         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1521                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1522                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1523                 goto out_timeout;
1524         }
1525
1526         /* Clear the Transmit and Command ready registers
1527          */
1528         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1529         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1530         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1531         typhoon_post_pci_writes(ioaddr);
1532         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1533
1534         return 0;
1535
1536 out_timeout:
1537         return -ETIMEDOUT;
1538 }
1539
1540 static u32
1541 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1542                         volatile u32 * index)
1543 {
1544         u32 lastRead = txRing->lastRead;
1545         struct tx_desc *tx;
1546         dma_addr_t skb_dma;
1547         int dma_len;
1548         int type;
1549
1550         while(lastRead != le32_to_cpu(*index)) {
1551                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1552                 type = tx->flags & TYPHOON_TYPE_MASK;
1553
1554                 if(type == TYPHOON_TX_DESC) {
1555                         /* This tx_desc describes a packet.
1556                          */
1557                         unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1558                         struct sk_buff *skb = (struct sk_buff *) ptr;
1559                         dev_kfree_skb_irq(skb);
1560                 } else if(type == TYPHOON_FRAG_DESC) {
1561                         /* This tx_desc describes a memory mapping. Free it.
1562                          */
1563                         skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1564                         dma_len = le16_to_cpu(tx->len);
1565                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1566                                        PCI_DMA_TODEVICE);
1567                 }
1568
1569                 tx->flags = 0;
1570                 typhoon_inc_tx_index(&lastRead, 1);
1571         }
1572
1573         return lastRead;
1574 }
1575
1576 static void
1577 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1578                         volatile u32 * index)
1579 {
1580         u32 lastRead;
1581         int numDesc = MAX_SKB_FRAGS + 1;
1582
1583         /* This will need changing if we start to use the Hi Tx ring. */
1584         lastRead = typhoon_clean_tx(tp, txRing, index);
1585         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1586                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1587                 netif_wake_queue(tp->dev);
1588
1589         txRing->lastRead = lastRead;
1590         smp_wmb();
1591 }
1592
1593 static void
1594 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1595 {
1596         struct typhoon_indexes *indexes = tp->indexes;
1597         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1598         struct basic_ring *ring = &tp->rxBuffRing;
1599         struct rx_free *r;
1600
1601         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1602                                 indexes->rxBuffCleared) {
1603                 /* no room in ring, just drop the skb
1604                  */
1605                 dev_kfree_skb_any(rxb->skb);
1606                 rxb->skb = NULL;
1607                 return;
1608         }
1609
1610         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1611         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1612         r->virtAddr = idx;
1613         r->physAddr = cpu_to_le32(rxb->dma_addr);
1614
1615         /* Tell the card about it */
1616         wmb();
1617         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1618 }
1619
1620 static int
1621 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1622 {
1623         struct typhoon_indexes *indexes = tp->indexes;
1624         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1625         struct basic_ring *ring = &tp->rxBuffRing;
1626         struct rx_free *r;
1627         struct sk_buff *skb;
1628         dma_addr_t dma_addr;
1629
1630         rxb->skb = NULL;
1631
1632         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1633                                 indexes->rxBuffCleared)
1634                 return -ENOMEM;
1635
1636         skb = dev_alloc_skb(PKT_BUF_SZ);
1637         if(!skb)
1638                 return -ENOMEM;
1639
1640 #if 0
1641         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1642          * address! Pretty please?
1643          */
1644         skb_reserve(skb, 2);
1645 #endif
1646
1647         skb->dev = tp->dev;
1648         dma_addr = pci_map_single(tp->pdev, skb->data,
1649                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1650
1651         /* Since no card does 64 bit DAC, the high bits will never
1652          * change from zero.
1653          */
1654         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1655         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1656         r->virtAddr = idx;
1657         r->physAddr = cpu_to_le32(dma_addr);
1658         rxb->skb = skb;
1659         rxb->dma_addr = dma_addr;
1660
1661         /* Tell the card about it */
1662         wmb();
1663         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1664         return 0;
1665 }
1666
1667 static int
1668 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1669            volatile u32 * cleared, int budget)
1670 {
1671         struct rx_desc *rx;
1672         struct sk_buff *skb, *new_skb;
1673         struct rxbuff_ent *rxb;
1674         dma_addr_t dma_addr;
1675         u32 local_ready;
1676         u32 rxaddr;
1677         int pkt_len;
1678         u32 idx;
1679         u32 csum_bits;
1680         int received;
1681
1682         received = 0;
1683         local_ready = le32_to_cpu(*ready);
1684         rxaddr = le32_to_cpu(*cleared);
1685         while(rxaddr != local_ready && budget > 0) {
1686                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1687                 idx = rx->addr;
1688                 rxb = &tp->rxbuffers[idx];
1689                 skb = rxb->skb;
1690                 dma_addr = rxb->dma_addr;
1691
1692                 typhoon_inc_rx_index(&rxaddr, 1);
1693
1694                 if(rx->flags & TYPHOON_RX_ERROR) {
1695                         typhoon_recycle_rx_skb(tp, idx);
1696                         continue;
1697                 }
1698
1699                 pkt_len = le16_to_cpu(rx->frameLen);
1700
1701                 if(pkt_len < rx_copybreak &&
1702                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1703                         skb_reserve(new_skb, 2);
1704                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1705                                                     PKT_BUF_SZ,
1706                                                     PCI_DMA_FROMDEVICE);
1707                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1708                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1709                                                        PKT_BUF_SZ,
1710                                                        PCI_DMA_FROMDEVICE);
1711                         skb_put(new_skb, pkt_len);
1712                         typhoon_recycle_rx_skb(tp, idx);
1713                 } else {
1714                         new_skb = skb;
1715                         skb_put(new_skb, pkt_len);
1716                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1717                                        PCI_DMA_FROMDEVICE);
1718                         typhoon_alloc_rx_skb(tp, idx);
1719                 }
1720                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1721                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1722                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1723                 if(csum_bits ==
1724                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1725                    || csum_bits ==
1726                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1727                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1728                 } else
1729                         new_skb->ip_summed = CHECKSUM_NONE;
1730
1731                 spin_lock(&tp->state_lock);
1732                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1733                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1734                                                  ntohl(rx->vlanTag) & 0xffff);
1735                 else
1736                         netif_receive_skb(new_skb);
1737                 spin_unlock(&tp->state_lock);
1738
1739                 tp->dev->last_rx = jiffies;
1740                 received++;
1741                 budget--;
1742         }
1743         *cleared = cpu_to_le32(rxaddr);
1744
1745         return received;
1746 }
1747
1748 static void
1749 typhoon_fill_free_ring(struct typhoon *tp)
1750 {
1751         u32 i;
1752
1753         for(i = 0; i < RXENT_ENTRIES; i++) {
1754                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1755                 if(rxb->skb)
1756                         continue;
1757                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1758                         break;
1759         }
1760 }
1761
1762 static int
1763 typhoon_poll(struct napi_struct *napi, int budget)
1764 {
1765         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1766         struct net_device *dev = tp->dev;
1767         struct typhoon_indexes *indexes = tp->indexes;
1768         int work_done;
1769
1770         rmb();
1771         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1772                         typhoon_process_response(tp, 0, NULL);
1773
1774         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1775                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1776
1777         work_done = 0;
1778
1779         if(indexes->rxHiCleared != indexes->rxHiReady) {
1780                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1781                                         &indexes->rxHiCleared, budget);
1782         }
1783
1784         if(indexes->rxLoCleared != indexes->rxLoReady) {
1785                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1786                                         &indexes->rxLoCleared, budget - work_done);
1787         }
1788
1789         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1790                 /* rxBuff ring is empty, try to fill it. */
1791                 typhoon_fill_free_ring(tp);
1792         }
1793
1794         if (work_done < budget) {
1795                 netif_rx_complete(dev, napi);
1796                 iowrite32(TYPHOON_INTR_NONE,
1797                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1798                 typhoon_post_pci_writes(tp->ioaddr);
1799         }
1800
1801         return work_done;
1802 }
1803
1804 static irqreturn_t
1805 typhoon_interrupt(int irq, void *dev_instance)
1806 {
1807         struct net_device *dev = (struct net_device *) dev_instance;
1808         struct typhoon *tp = dev->priv;
1809         void __iomem *ioaddr = tp->ioaddr;
1810         u32 intr_status;
1811
1812         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1813         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1814                 return IRQ_NONE;
1815
1816         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1817
1818         if (netif_rx_schedule_prep(dev, &tp->napi)) {
1819                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1820                 typhoon_post_pci_writes(ioaddr);
1821                 __netif_rx_schedule(dev, &tp->napi);
1822         } else {
1823                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1824                        dev->name);
1825         }
1826         return IRQ_HANDLED;
1827 }
1828
1829 static void
1830 typhoon_free_rx_rings(struct typhoon *tp)
1831 {
1832         u32 i;
1833
1834         for(i = 0; i < RXENT_ENTRIES; i++) {
1835                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1836                 if(rxb->skb) {
1837                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1838                                        PCI_DMA_FROMDEVICE);
1839                         dev_kfree_skb(rxb->skb);
1840                         rxb->skb = NULL;
1841                 }
1842         }
1843 }
1844
1845 static int
1846 typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
1847 {
1848         struct pci_dev *pdev = tp->pdev;
1849         void __iomem *ioaddr = tp->ioaddr;
1850         struct cmd_desc xp_cmd;
1851         int err;
1852
1853         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1854         xp_cmd.parm1 = events;
1855         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1856         if(err < 0) {
1857                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1858                                 tp->name, err);
1859                 return err;
1860         }
1861
1862         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1863         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1864         if(err < 0) {
1865                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1866                                 tp->name, err);
1867                 return err;
1868         }
1869
1870         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1871                 return -ETIMEDOUT;
1872
1873         /* Since we cannot monitor the status of the link while sleeping,
1874          * tell the world it went away.
1875          */
1876         netif_carrier_off(tp->dev);
1877
1878         pci_enable_wake(tp->pdev, state, 1);
1879         pci_disable_device(pdev);
1880         return pci_set_power_state(pdev, state);
1881 }
1882
1883 static int
1884 typhoon_wakeup(struct typhoon *tp, int wait_type)
1885 {
1886         struct pci_dev *pdev = tp->pdev;
1887         void __iomem *ioaddr = tp->ioaddr;
1888
1889         pci_set_power_state(pdev, PCI_D0);
1890         pci_restore_state(pdev);
1891
1892         /* Post 2.x.x versions of the Sleep Image require a reset before
1893          * we can download the Runtime Image. But let's not make users of
1894          * the old firmware pay for the reset.
1895          */
1896         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1897         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1898                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1899                 return typhoon_reset(ioaddr, wait_type);
1900
1901         return 0;
1902 }
1903
1904 static int
1905 typhoon_start_runtime(struct typhoon *tp)
1906 {
1907         struct net_device *dev = tp->dev;
1908         void __iomem *ioaddr = tp->ioaddr;
1909         struct cmd_desc xp_cmd;
1910         int err;
1911
1912         typhoon_init_rings(tp);
1913         typhoon_fill_free_ring(tp);
1914
1915         err = typhoon_download_firmware(tp);
1916         if(err < 0) {
1917                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1918                 goto error_out;
1919         }
1920
1921         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1922                 printk("%s: cannot boot 3XP\n", tp->name);
1923                 err = -EIO;
1924                 goto error_out;
1925         }
1926
1927         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1928         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1929         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1930         if(err < 0)
1931                 goto error_out;
1932
1933         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1934         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1935         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1936         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1937         if(err < 0)
1938                 goto error_out;
1939
1940         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1941          * us some more information on how to control it.
1942          */
1943         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1944         xp_cmd.parm1 = 0;
1945         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1946         if(err < 0)
1947                 goto error_out;
1948
1949         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1950         xp_cmd.parm1 = tp->xcvr_select;
1951         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1952         if(err < 0)
1953                 goto error_out;
1954
1955         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1956         xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1957         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1958         if(err < 0)
1959                 goto error_out;
1960
1961         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1962         spin_lock_bh(&tp->state_lock);
1963         xp_cmd.parm2 = tp->offload;
1964         xp_cmd.parm3 = tp->offload;
1965         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1966         spin_unlock_bh(&tp->state_lock);
1967         if(err < 0)
1968                 goto error_out;
1969
1970         typhoon_set_rx_mode(dev);
1971
1972         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1973         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1974         if(err < 0)
1975                 goto error_out;
1976
1977         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1978         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1979         if(err < 0)
1980                 goto error_out;
1981
1982         tp->card_state = Running;
1983         smp_wmb();
1984
1985         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1986         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1987         typhoon_post_pci_writes(ioaddr);
1988
1989         return 0;
1990
1991 error_out:
1992         typhoon_reset(ioaddr, WaitNoSleep);
1993         typhoon_free_rx_rings(tp);
1994         typhoon_init_rings(tp);
1995         return err;
1996 }
1997
1998 static int
1999 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2000 {
2001         struct typhoon_indexes *indexes = tp->indexes;
2002         struct transmit_ring *txLo = &tp->txLoRing;
2003         void __iomem *ioaddr = tp->ioaddr;
2004         struct cmd_desc xp_cmd;
2005         int i;
2006
2007         /* Disable interrupts early, since we can't schedule a poll
2008          * when called with !netif_running(). This will be posted
2009          * when we force the posting of the command.
2010          */
2011         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2012
2013         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2014         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2015
2016         /* Wait 1/2 sec for any outstanding transmits to occur
2017          * We'll cleanup after the reset if this times out.
2018          */
2019         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2020                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2021                         break;
2022                 udelay(TYPHOON_UDELAY);
2023         }
2024
2025         if(i == TYPHOON_WAIT_TIMEOUT)
2026                 printk(KERN_ERR
2027                        "%s: halt timed out waiting for Tx to complete\n",
2028                        tp->name);
2029
2030         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2031         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2032
2033         /* save the statistics so when we bring the interface up again,
2034          * the values reported to userspace are correct.
2035          */
2036         tp->card_state = Sleeping;
2037         smp_wmb();
2038         typhoon_do_get_stats(tp);
2039         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2040
2041         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2042         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2043
2044         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2045                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2046                        tp->name);
2047
2048         if(typhoon_reset(ioaddr, wait_type) < 0) {
2049                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2050                 return -ETIMEDOUT;
2051         }
2052
2053         /* cleanup any outstanding Tx packets */
2054         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2055                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2056                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2057         }
2058
2059         return 0;
2060 }
2061
2062 static void
2063 typhoon_tx_timeout(struct net_device *dev)
2064 {
2065         struct typhoon *tp = netdev_priv(dev);
2066
2067         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2068                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2069                                         dev->name);
2070                 goto truely_dead;
2071         }
2072
2073         /* If we ever start using the Hi ring, it will need cleaning too */
2074         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2075         typhoon_free_rx_rings(tp);
2076
2077         if(typhoon_start_runtime(tp) < 0) {
2078                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2079                                         dev->name);
2080                 goto truely_dead;
2081         }
2082
2083         netif_wake_queue(dev);
2084         return;
2085
2086 truely_dead:
2087         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2088         typhoon_reset(tp->ioaddr, NoWait);
2089         netif_carrier_off(dev);
2090 }
2091
2092 static int
2093 typhoon_open(struct net_device *dev)
2094 {
2095         struct typhoon *tp = netdev_priv(dev);
2096         int err;
2097
2098         err = typhoon_wakeup(tp, WaitSleep);
2099         if(err < 0) {
2100                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2101                 goto out_sleep;
2102         }
2103
2104         err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2105                                 dev->name, dev);
2106         if(err < 0)
2107                 goto out_sleep;
2108
2109         napi_enable(&tp->napi);
2110
2111         err = typhoon_start_runtime(tp);
2112         if(err < 0) {
2113                 napi_disable(&tp->napi);
2114                 goto out_irq;
2115         }
2116
2117         netif_start_queue(dev);
2118         return 0;
2119
2120 out_irq:
2121         free_irq(dev->irq, dev);
2122
2123 out_sleep:
2124         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2125                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2126                                 dev->name);
2127                 typhoon_reset(tp->ioaddr, NoWait);
2128                 goto out;
2129         }
2130
2131         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2132                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2133
2134 out:
2135         return err;
2136 }
2137
2138 static int
2139 typhoon_close(struct net_device *dev)
2140 {
2141         struct typhoon *tp = netdev_priv(dev);
2142
2143         netif_stop_queue(dev);
2144         napi_disable(&tp->napi);
2145
2146         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2147                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2148
2149         /* Make sure there is no irq handler running on a different CPU. */
2150         typhoon_synchronize_irq(dev->irq);
2151         free_irq(dev->irq, dev);
2152
2153         typhoon_free_rx_rings(tp);
2154         typhoon_init_rings(tp);
2155
2156         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2157                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2158
2159         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2160                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2161
2162         return 0;
2163 }
2164
2165 #ifdef CONFIG_PM
2166 static int
2167 typhoon_resume(struct pci_dev *pdev)
2168 {
2169         struct net_device *dev = pci_get_drvdata(pdev);
2170         struct typhoon *tp = netdev_priv(dev);
2171
2172         /* If we're down, resume when we are upped.
2173          */
2174         if(!netif_running(dev))
2175                 return 0;
2176
2177         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2178                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2179                                 dev->name);
2180                 goto reset;
2181         }
2182
2183         if(typhoon_start_runtime(tp) < 0) {
2184                 printk(KERN_ERR "%s: critical: could not start runtime in "
2185                                 "resume\n", dev->name);
2186                 goto reset;
2187         }
2188
2189         netif_device_attach(dev);
2190         netif_start_queue(dev);
2191         return 0;
2192
2193 reset:
2194         typhoon_reset(tp->ioaddr, NoWait);
2195         return -EBUSY;
2196 }
2197
2198 static int
2199 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2200 {
2201         struct net_device *dev = pci_get_drvdata(pdev);
2202         struct typhoon *tp = netdev_priv(dev);
2203         struct cmd_desc xp_cmd;
2204
2205         /* If we're down, we're already suspended.
2206          */
2207         if(!netif_running(dev))
2208                 return 0;
2209
2210         spin_lock_bh(&tp->state_lock);
2211         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2212                 spin_unlock_bh(&tp->state_lock);
2213                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2214                                 dev->name);
2215                 return -EBUSY;
2216         }
2217         spin_unlock_bh(&tp->state_lock);
2218
2219         netif_device_detach(dev);
2220
2221         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2222                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2223                 goto need_resume;
2224         }
2225
2226         typhoon_free_rx_rings(tp);
2227         typhoon_init_rings(tp);
2228
2229         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2230                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2231                 goto need_resume;
2232         }
2233
2234         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2235         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2236         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2237         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2238                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2239                                 dev->name);
2240                 goto need_resume;
2241         }
2242
2243         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2244         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2245         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2246                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2247                                 dev->name);
2248                 goto need_resume;
2249         }
2250
2251         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2252                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2253                 goto need_resume;
2254         }
2255
2256         return 0;
2257
2258 need_resume:
2259         typhoon_resume(pdev);
2260         return -EBUSY;
2261 }
2262 #endif
2263
2264 static int __devinit
2265 typhoon_test_mmio(struct pci_dev *pdev)
2266 {
2267         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2268         int mode = 0;
2269         u32 val;
2270
2271         if(!ioaddr)
2272                 goto out;
2273
2274         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2275                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2276                 goto out_unmap;
2277
2278         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2279         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2280         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2281
2282         /* Ok, see if we can change our interrupt status register by
2283          * sending ourselves an interrupt. If so, then MMIO works.
2284          * The 50usec delay is arbitrary -- it could probably be smaller.
2285          */
2286         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2287         if((val & TYPHOON_INTR_SELF) == 0) {
2288                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2289                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2290                 udelay(50);
2291                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2292                 if(val & TYPHOON_INTR_SELF)
2293                         mode = 1;
2294         }
2295
2296         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2297         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2298         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2299         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2300
2301 out_unmap:
2302         pci_iounmap(pdev, ioaddr);
2303
2304 out:
2305         if(!mode)
2306                 printk(KERN_INFO PFX "falling back to port IO\n");
2307         return mode;
2308 }
2309
2310 static int __devinit
2311 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2312 {
2313         static int did_version = 0;
2314         struct net_device *dev;
2315         struct typhoon *tp;
2316         int card_id = (int) ent->driver_data;
2317         void __iomem *ioaddr;
2318         void *shared;
2319         dma_addr_t shared_dma;
2320         struct cmd_desc xp_cmd;
2321         struct resp_desc xp_resp[3];
2322         int i;
2323         int err = 0;
2324
2325         if(!did_version++)
2326                 printk(KERN_INFO "%s", version);
2327
2328         dev = alloc_etherdev(sizeof(*tp));
2329         if(dev == NULL) {
2330                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2331                        pci_name(pdev));
2332                 err = -ENOMEM;
2333                 goto error_out;
2334         }
2335         SET_MODULE_OWNER(dev);
2336         SET_NETDEV_DEV(dev, &pdev->dev);
2337
2338         err = pci_enable_device(pdev);
2339         if(err < 0) {
2340                 printk(ERR_PFX "%s: unable to enable device\n",
2341                        pci_name(pdev));
2342                 goto error_out_dev;
2343         }
2344
2345         err = pci_set_mwi(pdev);
2346         if(err < 0) {
2347                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2348                 goto error_out_disable;
2349         }
2350
2351         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2352         if(err < 0) {
2353                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2354                        pci_name(pdev));
2355                 goto error_out_mwi;
2356         }
2357
2358         /* sanity checks on IO and MMIO BARs
2359          */
2360         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2361                 printk(ERR_PFX
2362                        "%s: region #1 not a PCI IO resource, aborting\n",
2363                        pci_name(pdev));
2364                 err = -ENODEV;
2365                 goto error_out_mwi;
2366         }
2367         if(pci_resource_len(pdev, 0) < 128) {
2368                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2369                        pci_name(pdev));
2370                 err = -ENODEV;
2371                 goto error_out_mwi;
2372         }
2373         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2374                 printk(ERR_PFX
2375                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2376                        pci_name(pdev));
2377                 err = -ENODEV;
2378                 goto error_out_mwi;
2379         }
2380         if(pci_resource_len(pdev, 1) < 128) {
2381                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2382                        pci_name(pdev));
2383                 err = -ENODEV;
2384                 goto error_out_mwi;
2385         }
2386
2387         err = pci_request_regions(pdev, "typhoon");
2388         if(err < 0) {
2389                 printk(ERR_PFX "%s: could not request regions\n",
2390                        pci_name(pdev));
2391                 goto error_out_mwi;
2392         }
2393
2394         /* map our registers
2395          */
2396         if(use_mmio != 0 && use_mmio != 1)
2397                 use_mmio = typhoon_test_mmio(pdev);
2398
2399         ioaddr = pci_iomap(pdev, use_mmio, 128);
2400         if (!ioaddr) {
2401                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2402                        pci_name(pdev));
2403                 err = -EIO;
2404                 goto error_out_regions;
2405         }
2406
2407         /* allocate pci dma space for rx and tx descriptor rings
2408          */
2409         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2410                                       &shared_dma);
2411         if(!shared) {
2412                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2413                        pci_name(pdev));
2414                 err = -ENOMEM;
2415                 goto error_out_remap;
2416         }
2417
2418         dev->irq = pdev->irq;
2419         tp = netdev_priv(dev);
2420         tp->shared = (struct typhoon_shared *) shared;
2421         tp->shared_dma = shared_dma;
2422         tp->pdev = pdev;
2423         tp->tx_pdev = pdev;
2424         tp->ioaddr = ioaddr;
2425         tp->tx_ioaddr = ioaddr;
2426         tp->dev = dev;
2427
2428         /* Init sequence:
2429          * 1) Reset the adapter to clear any bad juju
2430          * 2) Reload the sleep image
2431          * 3) Boot the sleep image
2432          * 4) Get the hardware address.
2433          * 5) Put the card to sleep.
2434          */
2435         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2436                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2437                 err = -EIO;
2438                 goto error_out_dma;
2439         }
2440
2441         /* Now that we've reset the 3XP and are sure it's not going to
2442          * write all over memory, enable bus mastering, and save our
2443          * state for resuming after a suspend.
2444          */
2445         pci_set_master(pdev);
2446         pci_save_state(pdev);
2447
2448         /* dev->name is not valid until we register, but we need to
2449          * use some common routines to initialize the card. So that those
2450          * routines print the right name, we keep our oun pointer to the name
2451          */
2452         tp->name = pci_name(pdev);
2453
2454         typhoon_init_interface(tp);
2455         typhoon_init_rings(tp);
2456
2457         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2458                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2459                        pci_name(pdev));
2460                 err = -EIO;
2461                 goto error_out_reset;
2462         }
2463
2464         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2465         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2466                 printk(ERR_PFX "%s: cannot read MAC address\n",
2467                        pci_name(pdev));
2468                 err = -EIO;
2469                 goto error_out_reset;
2470         }
2471
2472         *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2473         *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2474
2475         if(!is_valid_ether_addr(dev->dev_addr)) {
2476                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2477                        "aborting\n", pci_name(pdev));
2478                 goto error_out_reset;
2479         }
2480
2481         /* Read the Sleep Image version last, so the response is valid
2482          * later when we print out the version reported.
2483          */
2484         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2485         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2486                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2487                         pci_name(pdev));
2488                 goto error_out_reset;
2489         }
2490
2491         tp->capabilities = typhoon_card_info[card_id].capabilities;
2492         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2493
2494         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2495          * READ_VERSIONS command. Those versions are OK after waking up
2496          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2497          * seem to need a little extra help to get started. Since we don't
2498          * know how to nudge it along, just kick it.
2499          */
2500         if(xp_resp[0].numDesc != 0)
2501                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2502
2503         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2504                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2505                        pci_name(pdev));
2506                 err = -EIO;
2507                 goto error_out_reset;
2508         }
2509
2510         /* The chip-specific entries in the device structure. */
2511         dev->open               = typhoon_open;
2512         dev->hard_start_xmit    = typhoon_start_tx;
2513         dev->stop               = typhoon_close;
2514         dev->set_multicast_list = typhoon_set_rx_mode;
2515         dev->tx_timeout         = typhoon_tx_timeout;
2516         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2517         dev->watchdog_timeo     = TX_TIMEOUT;
2518         dev->get_stats          = typhoon_get_stats;
2519         dev->set_mac_address    = typhoon_set_mac_address;
2520         dev->vlan_rx_register   = typhoon_vlan_rx_register;
2521
2522         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2523
2524         /* We can handle scatter gather, up to 16 entries, and
2525          * we can do IP checksumming (only version 4, doh...)
2526          */
2527         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2528         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2529         dev->features |= NETIF_F_TSO;
2530
2531         if(register_netdev(dev) < 0)
2532                 goto error_out_reset;
2533
2534         /* fixup our local name */
2535         tp->name = dev->name;
2536
2537         pci_set_drvdata(pdev, dev);
2538
2539         printk(KERN_INFO "%s: %s at %s 0x%llx, ",
2540                dev->name, typhoon_card_info[card_id].name,
2541                use_mmio ? "MMIO" : "IO",
2542                (unsigned long long)pci_resource_start(pdev, use_mmio));
2543         for(i = 0; i < 5; i++)
2544                 printk("%2.2x:", dev->dev_addr[i]);
2545         printk("%2.2x\n", dev->dev_addr[i]);
2546
2547         /* xp_resp still contains the response to the READ_VERSIONS command.
2548          * For debugging, let the user know what version he has.
2549          */
2550         if(xp_resp[0].numDesc == 0) {
2551                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2552                  * of version is Month/Day of build.
2553                  */
2554                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2555                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2556                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2557                         monthday & 0xff);
2558         } else if(xp_resp[0].numDesc == 2) {
2559                 /* This is the Typhoon 1.1+ type Sleep Image
2560                  */
2561                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2562                 u8 *ver_string = (u8 *) &xp_resp[1];
2563                 ver_string[25] = 0;
2564                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2565                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2566                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2567                         ver_string);
2568         } else {
2569                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2570                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2571                         le32_to_cpu(xp_resp[0].parm2));
2572         }
2573
2574         return 0;
2575
2576 error_out_reset:
2577         typhoon_reset(ioaddr, NoWait);
2578
2579 error_out_dma:
2580         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2581                             shared, shared_dma);
2582 error_out_remap:
2583         pci_iounmap(pdev, ioaddr);
2584 error_out_regions:
2585         pci_release_regions(pdev);
2586 error_out_mwi:
2587         pci_clear_mwi(pdev);
2588 error_out_disable:
2589         pci_disable_device(pdev);
2590 error_out_dev:
2591         free_netdev(dev);
2592 error_out:
2593         return err;
2594 }
2595
2596 static void __devexit
2597 typhoon_remove_one(struct pci_dev *pdev)
2598 {
2599         struct net_device *dev = pci_get_drvdata(pdev);
2600         struct typhoon *tp = netdev_priv(dev);
2601
2602         unregister_netdev(dev);
2603         pci_set_power_state(pdev, PCI_D0);
2604         pci_restore_state(pdev);
2605         typhoon_reset(tp->ioaddr, NoWait);
2606         pci_iounmap(pdev, tp->ioaddr);
2607         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2608                             tp->shared, tp->shared_dma);
2609         pci_release_regions(pdev);
2610         pci_clear_mwi(pdev);
2611         pci_disable_device(pdev);
2612         pci_set_drvdata(pdev, NULL);
2613         free_netdev(dev);
2614 }
2615
2616 static struct pci_driver typhoon_driver = {
2617         .name           = DRV_MODULE_NAME,
2618         .id_table       = typhoon_pci_tbl,
2619         .probe          = typhoon_init_one,
2620         .remove         = __devexit_p(typhoon_remove_one),
2621 #ifdef CONFIG_PM
2622         .suspend        = typhoon_suspend,
2623         .resume         = typhoon_resume,
2624 #endif
2625 };
2626
2627 static int __init
2628 typhoon_init(void)
2629 {
2630         return pci_register_driver(&typhoon_driver);
2631 }
2632
2633 static void __exit
2634 typhoon_cleanup(void)
2635 {
2636         pci_unregister_driver(&typhoon_driver);
2637 }
2638
2639 module_init(typhoon_init);
2640 module_exit(typhoon_cleanup);