[PATCH] cs89x0: use #elif instead of #else/#if/#endif
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.47"
73 #define DRV_MODULE_RELDATE      "Dec 28, 2005"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { 0, }
249 };
250
251 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
252
253 static struct {
254         const char string[ETH_GSTRING_LEN];
255 } ethtool_stats_keys[TG3_NUM_STATS] = {
256         { "rx_octets" },
257         { "rx_fragments" },
258         { "rx_ucast_packets" },
259         { "rx_mcast_packets" },
260         { "rx_bcast_packets" },
261         { "rx_fcs_errors" },
262         { "rx_align_errors" },
263         { "rx_xon_pause_rcvd" },
264         { "rx_xoff_pause_rcvd" },
265         { "rx_mac_ctrl_rcvd" },
266         { "rx_xoff_entered" },
267         { "rx_frame_too_long_errors" },
268         { "rx_jabbers" },
269         { "rx_undersize_packets" },
270         { "rx_in_length_errors" },
271         { "rx_out_length_errors" },
272         { "rx_64_or_less_octet_packets" },
273         { "rx_65_to_127_octet_packets" },
274         { "rx_128_to_255_octet_packets" },
275         { "rx_256_to_511_octet_packets" },
276         { "rx_512_to_1023_octet_packets" },
277         { "rx_1024_to_1522_octet_packets" },
278         { "rx_1523_to_2047_octet_packets" },
279         { "rx_2048_to_4095_octet_packets" },
280         { "rx_4096_to_8191_octet_packets" },
281         { "rx_8192_to_9022_octet_packets" },
282
283         { "tx_octets" },
284         { "tx_collisions" },
285
286         { "tx_xon_sent" },
287         { "tx_xoff_sent" },
288         { "tx_flow_control" },
289         { "tx_mac_errors" },
290         { "tx_single_collisions" },
291         { "tx_mult_collisions" },
292         { "tx_deferred" },
293         { "tx_excessive_collisions" },
294         { "tx_late_collisions" },
295         { "tx_collide_2times" },
296         { "tx_collide_3times" },
297         { "tx_collide_4times" },
298         { "tx_collide_5times" },
299         { "tx_collide_6times" },
300         { "tx_collide_7times" },
301         { "tx_collide_8times" },
302         { "tx_collide_9times" },
303         { "tx_collide_10times" },
304         { "tx_collide_11times" },
305         { "tx_collide_12times" },
306         { "tx_collide_13times" },
307         { "tx_collide_14times" },
308         { "tx_collide_15times" },
309         { "tx_ucast_packets" },
310         { "tx_mcast_packets" },
311         { "tx_bcast_packets" },
312         { "tx_carrier_sense_errors" },
313         { "tx_discards" },
314         { "tx_errors" },
315
316         { "dma_writeq_full" },
317         { "dma_write_prioq_full" },
318         { "rxbds_empty" },
319         { "rx_discards" },
320         { "rx_errors" },
321         { "rx_threshold_hit" },
322
323         { "dma_readq_full" },
324         { "dma_read_prioq_full" },
325         { "tx_comp_queue_full" },
326
327         { "ring_set_send_prod_index" },
328         { "ring_status_update" },
329         { "nic_irqs" },
330         { "nic_avoided_irqs" },
331         { "nic_tx_threshold_hit" }
332 };
333
334 static struct {
335         const char string[ETH_GSTRING_LEN];
336 } ethtool_test_keys[TG3_NUM_TEST] = {
337         { "nvram test     (online) " },
338         { "link test      (online) " },
339         { "register test  (offline)" },
340         { "memory test    (offline)" },
341         { "loopback test  (offline)" },
342         { "interrupt test (offline)" },
343 };
344
345 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
346 {
347         writel(val, tp->regs + off);
348 }
349
350 static u32 tg3_read32(struct tg3 *tp, u32 off)
351 {
352         return (readl(tp->regs + off)); 
353 }
354
355 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
356 {
357         unsigned long flags;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 }
364
365 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
366 {
367         writel(val, tp->regs + off);
368         readl(tp->regs + off);
369 }
370
371 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
372 {
373         unsigned long flags;
374         u32 val;
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380         return val;
381 }
382
383 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
384 {
385         unsigned long flags;
386
387         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
388                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
389                                        TG3_64BIT_REG_LOW, val);
390                 return;
391         }
392         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
393                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
394                                        TG3_64BIT_REG_LOW, val);
395                 return;
396         }
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401         spin_unlock_irqrestore(&tp->indirect_lock, flags);
402
403         /* In indirect mode when disabling interrupts, we also need
404          * to clear the interrupt bit in the GRC local ctrl register.
405          */
406         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
407             (val == 0x1)) {
408                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
409                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
410         }
411 }
412
413 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
414 {
415         unsigned long flags;
416         u32 val;
417
418         spin_lock_irqsave(&tp->indirect_lock, flags);
419         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
420         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
421         spin_unlock_irqrestore(&tp->indirect_lock, flags);
422         return val;
423 }
424
425 /* usec_wait specifies the wait time in usec when writing to certain registers
426  * where it is unsafe to read back the register without some delay.
427  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
428  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
429  */
430 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
431 {
432         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
433             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
434                 /* Non-posted methods */
435                 tp->write32(tp, off, val);
436         else {
437                 /* Posted method */
438                 tg3_write32(tp, off, val);
439                 if (usec_wait)
440                         udelay(usec_wait);
441                 tp->read32(tp, off);
442         }
443         /* Wait again after the read for the posted method to guarantee that
444          * the wait time is met.
445          */
446         if (usec_wait)
447                 udelay(usec_wait);
448 }
449
450 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
451 {
452         tp->write32_mbox(tp, off, val);
453         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
454             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
455                 tp->read32_mbox(tp, off);
456 }
457
458 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
459 {
460         void __iomem *mbox = tp->regs + off;
461         writel(val, mbox);
462         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
463                 writel(val, mbox);
464         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
465                 readl(mbox);
466 }
467
468 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
469 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
470 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
471 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
472 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
473
474 #define tw32(reg,val)           tp->write32(tp, reg, val)
475 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
476 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
477 #define tr32(reg)               tp->read32(tp, reg)
478
479 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
480 {
481         unsigned long flags;
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
486
487         /* Always leave this as zero. */
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
493 {
494         /* If no workaround is needed, write to mem space directly */
495         if (tp->write32 != tg3_write_indirect_reg32)
496                 tw32(NIC_SRAM_WIN_BASE + off, val);
497         else
498                 tg3_write_mem(tp, off, val);
499 }
500
501 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
502 {
503         unsigned long flags;
504
505         spin_lock_irqsave(&tp->indirect_lock, flags);
506         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
508
509         /* Always leave this as zero. */
510         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         spin_unlock_irqrestore(&tp->indirect_lock, flags);
512 }
513
514 static void tg3_disable_ints(struct tg3 *tp)
515 {
516         tw32(TG3PCI_MISC_HOST_CTRL,
517              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
518         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
519 }
520
521 static inline void tg3_cond_int(struct tg3 *tp)
522 {
523         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
524             (tp->hw_status->status & SD_STATUS_UPDATED))
525                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
526 }
527
528 static void tg3_enable_ints(struct tg3 *tp)
529 {
530         tp->irq_sync = 0;
531         wmb();
532
533         tw32(TG3PCI_MISC_HOST_CTRL,
534              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
536                        (tp->last_tag << 24));
537         tg3_cond_int(tp);
538 }
539
540 static inline unsigned int tg3_has_work(struct tg3 *tp)
541 {
542         struct tg3_hw_status *sblk = tp->hw_status;
543         unsigned int work_exists = 0;
544
545         /* check for phy events */
546         if (!(tp->tg3_flags &
547               (TG3_FLAG_USE_LINKCHG_REG |
548                TG3_FLAG_POLL_SERDES))) {
549                 if (sblk->status & SD_STATUS_LINK_CHG)
550                         work_exists = 1;
551         }
552         /* check for RX/TX work to do */
553         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
554             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
555                 work_exists = 1;
556
557         return work_exists;
558 }
559
560 /* tg3_restart_ints
561  *  similar to tg3_enable_ints, but it accurately determines whether there
562  *  is new work pending and can return without flushing the PIO write
563  *  which reenables interrupts 
564  */
565 static void tg3_restart_ints(struct tg3 *tp)
566 {
567         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
568                      tp->last_tag << 24);
569         mmiowb();
570
571         /* When doing tagged status, this work check is unnecessary.
572          * The last_tag we write above tells the chip which piece of
573          * work we've completed.
574          */
575         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
576             tg3_has_work(tp))
577                 tw32(HOSTCC_MODE, tp->coalesce_mode |
578                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
579 }
580
581 static inline void tg3_netif_stop(struct tg3 *tp)
582 {
583         tp->dev->trans_start = jiffies; /* prevent tx timeout */
584         netif_poll_disable(tp->dev);
585         netif_tx_disable(tp->dev);
586 }
587
588 static inline void tg3_netif_start(struct tg3 *tp)
589 {
590         netif_wake_queue(tp->dev);
591         /* NOTE: unconditional netif_wake_queue is only appropriate
592          * so long as all callers are assured to have free tx slots
593          * (such as after tg3_init_hw)
594          */
595         netif_poll_enable(tp->dev);
596         tp->hw_status->status |= SD_STATUS_UPDATED;
597         tg3_enable_ints(tp);
598 }
599
600 static void tg3_switch_clocks(struct tg3 *tp)
601 {
602         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
603         u32 orig_clock_ctrl;
604
605         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
606                 return;
607
608         orig_clock_ctrl = clock_ctrl;
609         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
610                        CLOCK_CTRL_CLKRUN_OENABLE |
611                        0x1f);
612         tp->pci_clock_ctrl = clock_ctrl;
613
614         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
615                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
616                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
618                 }
619         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl |
622                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
623                             40);
624                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
626                             40);
627         }
628         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
629 }
630
631 #define PHY_BUSY_LOOPS  5000
632
633 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
634 {
635         u32 frame_val;
636         unsigned int loops;
637         int ret;
638
639         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
640                 tw32_f(MAC_MI_MODE,
641                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
642                 udelay(80);
643         }
644
645         *val = 0x0;
646
647         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
648                       MI_COM_PHY_ADDR_MASK);
649         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
650                       MI_COM_REG_ADDR_MASK);
651         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
652         
653         tw32_f(MAC_MI_COM, frame_val);
654
655         loops = PHY_BUSY_LOOPS;
656         while (loops != 0) {
657                 udelay(10);
658                 frame_val = tr32(MAC_MI_COM);
659
660                 if ((frame_val & MI_COM_BUSY) == 0) {
661                         udelay(5);
662                         frame_val = tr32(MAC_MI_COM);
663                         break;
664                 }
665                 loops -= 1;
666         }
667
668         ret = -EBUSY;
669         if (loops != 0) {
670                 *val = frame_val & MI_COM_DATA_MASK;
671                 ret = 0;
672         }
673
674         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
675                 tw32_f(MAC_MI_MODE, tp->mi_mode);
676                 udelay(80);
677         }
678
679         return ret;
680 }
681
682 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
683 {
684         u32 frame_val;
685         unsigned int loops;
686         int ret;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700         
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_set_wirespeed(struct tg3 *tp)
728 {
729         u32 val;
730
731         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
732                 return;
733
734         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
735             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
736                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
737                              (val | (1 << 15) | (1 << 4)));
738 }
739
740 static int tg3_bmcr_reset(struct tg3 *tp)
741 {
742         u32 phy_control;
743         int limit, err;
744
745         /* OK, reset it, and poll the BMCR_RESET bit until it
746          * clears or we time out.
747          */
748         phy_control = BMCR_RESET;
749         err = tg3_writephy(tp, MII_BMCR, phy_control);
750         if (err != 0)
751                 return -EBUSY;
752
753         limit = 5000;
754         while (limit--) {
755                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
756                 if (err != 0)
757                         return -EBUSY;
758
759                 if ((phy_control & BMCR_RESET) == 0) {
760                         udelay(40);
761                         break;
762                 }
763                 udelay(10);
764         }
765         if (limit <= 0)
766                 return -EBUSY;
767
768         return 0;
769 }
770
771 static int tg3_wait_macro_done(struct tg3 *tp)
772 {
773         int limit = 100;
774
775         while (limit--) {
776                 u32 tmp32;
777
778                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
779                         if ((tmp32 & 0x1000) == 0)
780                                 break;
781                 }
782         }
783         if (limit <= 0)
784                 return -EBUSY;
785
786         return 0;
787 }
788
789 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
790 {
791         static const u32 test_pat[4][6] = {
792         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
796         };
797         int chan;
798
799         for (chan = 0; chan < 4; chan++) {
800                 int i;
801
802                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
803                              (chan * 0x2000) | 0x0200);
804                 tg3_writephy(tp, 0x16, 0x0002);
805
806                 for (i = 0; i < 6; i++)
807                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
808                                      test_pat[chan][i]);
809
810                 tg3_writephy(tp, 0x16, 0x0202);
811                 if (tg3_wait_macro_done(tp)) {
812                         *resetp = 1;
813                         return -EBUSY;
814                 }
815
816                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
817                              (chan * 0x2000) | 0x0200);
818                 tg3_writephy(tp, 0x16, 0x0082);
819                 if (tg3_wait_macro_done(tp)) {
820                         *resetp = 1;
821                         return -EBUSY;
822                 }
823
824                 tg3_writephy(tp, 0x16, 0x0802);
825                 if (tg3_wait_macro_done(tp)) {
826                         *resetp = 1;
827                         return -EBUSY;
828                 }
829
830                 for (i = 0; i < 6; i += 2) {
831                         u32 low, high;
832
833                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
834                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
835                             tg3_wait_macro_done(tp)) {
836                                 *resetp = 1;
837                                 return -EBUSY;
838                         }
839                         low &= 0x7fff;
840                         high &= 0x000f;
841                         if (low != test_pat[chan][i] ||
842                             high != test_pat[chan][i+1]) {
843                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
844                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
846
847                                 return -EBUSY;
848                         }
849                 }
850         }
851
852         return 0;
853 }
854
855 static int tg3_phy_reset_chanpat(struct tg3 *tp)
856 {
857         int chan;
858
859         for (chan = 0; chan < 4; chan++) {
860                 int i;
861
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
863                              (chan * 0x2000) | 0x0200);
864                 tg3_writephy(tp, 0x16, 0x0002);
865                 for (i = 0; i < 6; i++)
866                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
867                 tg3_writephy(tp, 0x16, 0x0202);
868                 if (tg3_wait_macro_done(tp))
869                         return -EBUSY;
870         }
871
872         return 0;
873 }
874
875 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
876 {
877         u32 reg32, phy9_orig;
878         int retries, do_phy_reset, err;
879
880         retries = 10;
881         do_phy_reset = 1;
882         do {
883                 if (do_phy_reset) {
884                         err = tg3_bmcr_reset(tp);
885                         if (err)
886                                 return err;
887                         do_phy_reset = 0;
888                 }
889
890                 /* Disable transmitter and interrupt.  */
891                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
892                         continue;
893
894                 reg32 |= 0x3000;
895                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
896
897                 /* Set full-duplex, 1000 mbps.  */
898                 tg3_writephy(tp, MII_BMCR,
899                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
900
901                 /* Set to master mode.  */
902                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
903                         continue;
904
905                 tg3_writephy(tp, MII_TG3_CTRL,
906                              (MII_TG3_CTRL_AS_MASTER |
907                               MII_TG3_CTRL_ENABLE_AS_MASTER));
908
909                 /* Enable SM_DSP_CLOCK and 6dB.  */
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
911
912                 /* Block the PHY control access.  */
913                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
914                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
915
916                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
917                 if (!err)
918                         break;
919         } while (--retries);
920
921         err = tg3_phy_reset_chanpat(tp);
922         if (err)
923                 return err;
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
927
928         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
929         tg3_writephy(tp, 0x16, 0x0000);
930
931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
933                 /* Set Extended packet length bit for jumbo frames */
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
935         }
936         else {
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
938         }
939
940         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
941
942         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
943                 reg32 &= ~0x3000;
944                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
945         } else if (!err)
946                 err = -EBUSY;
947
948         return err;
949 }
950
951 /* This will reset the tigon3 PHY if there is no valid
952  * link unless the FORCE argument is non-zero.
953  */
954 static int tg3_phy_reset(struct tg3 *tp)
955 {
956         u32 phy_status;
957         int err;
958
959         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
960         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
961         if (err != 0)
962                 return -EBUSY;
963
964         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
967                 err = tg3_phy_reset_5703_4_5(tp);
968                 if (err)
969                         return err;
970                 goto out;
971         }
972
973         err = tg3_bmcr_reset(tp);
974         if (err)
975                 return err;
976
977 out:
978         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
979                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
983                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
984                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
985         }
986         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
987                 tg3_writephy(tp, 0x1c, 0x8d68);
988                 tg3_writephy(tp, 0x1c, 0x8d68);
989         }
990         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
991                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
993                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
994                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
995                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
996                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
997                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
999         }
1000         /* Set Extended packet length bit (bit 14) on all chips that */
1001         /* support jumbo frames */
1002         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1003                 /* Cannot do read-modify-write on 5401 */
1004                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1005         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1006                 u32 phy_reg;
1007
1008                 /* Set bit 14 with read-modify-write to preserve other bits */
1009                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1010                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1011                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1012         }
1013
1014         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1015          * jumbo frames transmission.
1016          */
1017         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1018                 u32 phy_reg;
1019
1020                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1021                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1022                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1023         }
1024
1025         tg3_phy_set_wirespeed(tp);
1026         return 0;
1027 }
1028
1029 static void tg3_frob_aux_power(struct tg3 *tp)
1030 {
1031         struct tg3 *tp_peer = tp;
1032
1033         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1034                 return;
1035
1036         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1037             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1038                 struct net_device *dev_peer;
1039
1040                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1041                 if (!dev_peer)
1042                         BUG();
1043                 tp_peer = netdev_priv(dev_peer);
1044         }
1045
1046         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1047             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1048             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1049             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1052                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1053                                     (GRC_LCLCTRL_GPIO_OE0 |
1054                                      GRC_LCLCTRL_GPIO_OE1 |
1055                                      GRC_LCLCTRL_GPIO_OE2 |
1056                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1057                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1058                                     100);
1059                 } else {
1060                         u32 no_gpio2;
1061                         u32 grc_local_ctrl = 0;
1062
1063                         if (tp_peer != tp &&
1064                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1065                                 return;
1066
1067                         /* Workaround to prevent overdrawing Amps. */
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1069                             ASIC_REV_5714) {
1070                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1071                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072                                             grc_local_ctrl, 100);
1073                         }
1074
1075                         /* On 5753 and variants, GPIO2 cannot be used. */
1076                         no_gpio2 = tp->nic_sram_data_cfg &
1077                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1078
1079                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1080                                          GRC_LCLCTRL_GPIO_OE1 |
1081                                          GRC_LCLCTRL_GPIO_OE2 |
1082                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1083                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1084                         if (no_gpio2) {
1085                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1086                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1087                         }
1088                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                                                     grc_local_ctrl, 100);
1090
1091                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1092
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                                     grc_local_ctrl, 100);
1095
1096                         if (!no_gpio2) {
1097                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1098                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1099                                             grc_local_ctrl, 100);
1100                         }
1101                 }
1102         } else {
1103                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1104                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110                                     (GRC_LCLCTRL_GPIO_OE1 |
1111                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1112
1113                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                     GRC_LCLCTRL_GPIO_OE1, 100);
1115
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                     (GRC_LCLCTRL_GPIO_OE1 |
1118                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1119                 }
1120         }
1121 }
1122
1123 static int tg3_setup_phy(struct tg3 *, int);
1124
1125 #define RESET_KIND_SHUTDOWN     0
1126 #define RESET_KIND_INIT         1
1127 #define RESET_KIND_SUSPEND      2
1128
1129 static void tg3_write_sig_post_reset(struct tg3 *, int);
1130 static int tg3_halt_cpu(struct tg3 *, u32);
1131 static int tg3_nvram_lock(struct tg3 *);
1132 static void tg3_nvram_unlock(struct tg3 *);
1133
1134 static int tg3_set_power_state(struct tg3 *tp, int state)
1135 {
1136         u32 misc_host_ctrl;
1137         u16 power_control, power_caps;
1138         int pm = tp->pm_cap;
1139
1140         /* Make sure register accesses (indirect or otherwise)
1141          * will function correctly.
1142          */
1143         pci_write_config_dword(tp->pdev,
1144                                TG3PCI_MISC_HOST_CTRL,
1145                                tp->misc_host_ctrl);
1146
1147         pci_read_config_word(tp->pdev,
1148                              pm + PCI_PM_CTRL,
1149                              &power_control);
1150         power_control |= PCI_PM_CTRL_PME_STATUS;
1151         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1152         switch (state) {
1153         case 0:
1154                 power_control |= 0;
1155                 pci_write_config_word(tp->pdev,
1156                                       pm + PCI_PM_CTRL,
1157                                       power_control);
1158                 udelay(100);    /* Delay after power state change */
1159
1160                 /* Switch out of Vaux if it is not a LOM */
1161                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1162                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1163
1164                 return 0;
1165
1166         case 1:
1167                 power_control |= 1;
1168                 break;
1169
1170         case 2:
1171                 power_control |= 2;
1172                 break;
1173
1174         case 3:
1175                 power_control |= 3;
1176                 break;
1177
1178         default:
1179                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1180                        "requested.\n",
1181                        tp->dev->name, state);
1182                 return -EINVAL;
1183         };
1184
1185         power_control |= PCI_PM_CTRL_PME_ENABLE;
1186
1187         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1188         tw32(TG3PCI_MISC_HOST_CTRL,
1189              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1190
1191         if (tp->link_config.phy_is_low_power == 0) {
1192                 tp->link_config.phy_is_low_power = 1;
1193                 tp->link_config.orig_speed = tp->link_config.speed;
1194                 tp->link_config.orig_duplex = tp->link_config.duplex;
1195                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1196         }
1197
1198         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1199                 tp->link_config.speed = SPEED_10;
1200                 tp->link_config.duplex = DUPLEX_HALF;
1201                 tp->link_config.autoneg = AUTONEG_ENABLE;
1202                 tg3_setup_phy(tp, 0);
1203         }
1204
1205         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1206                 int i;
1207                 u32 val;
1208
1209                 for (i = 0; i < 200; i++) {
1210                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1211                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1212                                 break;
1213                         msleep(1);
1214                 }
1215         }
1216         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1217                                              WOL_DRV_STATE_SHUTDOWN |
1218                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1219
1220         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1221
1222         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1223                 u32 mac_mode;
1224
1225                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1226                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1227                         udelay(40);
1228
1229                         mac_mode = MAC_MODE_PORT_MODE_MII;
1230
1231                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1232                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1233                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1234                 } else {
1235                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1236                 }
1237
1238                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1239                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1240
1241                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1242                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1243                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1244
1245                 tw32_f(MAC_MODE, mac_mode);
1246                 udelay(100);
1247
1248                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1249                 udelay(10);
1250         }
1251
1252         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1253             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1254              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1255                 u32 base_val;
1256
1257                 base_val = tp->pci_clock_ctrl;
1258                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1259                              CLOCK_CTRL_TXCLK_DISABLE);
1260
1261                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1262                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1263         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1264                 /* do nothing */
1265         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1266                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1267                 u32 newbits1, newbits2;
1268
1269                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1270                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1271                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1272                                     CLOCK_CTRL_TXCLK_DISABLE |
1273                                     CLOCK_CTRL_ALTCLK);
1274                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1275                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1276                         newbits1 = CLOCK_CTRL_625_CORE;
1277                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1278                 } else {
1279                         newbits1 = CLOCK_CTRL_ALTCLK;
1280                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281                 }
1282
1283                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1284                             40);
1285
1286                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1287                             40);
1288
1289                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1290                         u32 newbits3;
1291
1292                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1293                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1294                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1295                                             CLOCK_CTRL_TXCLK_DISABLE |
1296                                             CLOCK_CTRL_44MHZ_CORE);
1297                         } else {
1298                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1299                         }
1300
1301                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1302                                     tp->pci_clock_ctrl | newbits3, 40);
1303                 }
1304         }
1305
1306         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1307             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1308                 /* Turn off the PHY */
1309                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1310                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1311                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1312                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1313                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1314                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1315                 }
1316         }
1317
1318         tg3_frob_aux_power(tp);
1319
1320         /* Workaround for unstable PLL clock */
1321         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1322             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1323                 u32 val = tr32(0x7d00);
1324
1325                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326                 tw32(0x7d00, val);
1327                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328                         tg3_nvram_lock(tp);
1329                         tg3_halt_cpu(tp, RX_CPU_BASE);
1330                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
1331                         tg3_nvram_unlock(tp);
1332                 }
1333         }
1334
1335         /* Finally, set the new power state. */
1336         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1337         udelay(100);    /* Delay after power state change */
1338
1339         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1340
1341         return 0;
1342 }
1343
1344 static void tg3_link_report(struct tg3 *tp)
1345 {
1346         if (!netif_carrier_ok(tp->dev)) {
1347                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1348         } else {
1349                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1350                        tp->dev->name,
1351                        (tp->link_config.active_speed == SPEED_1000 ?
1352                         1000 :
1353                         (tp->link_config.active_speed == SPEED_100 ?
1354                          100 : 10)),
1355                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1356                         "full" : "half"));
1357
1358                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1359                        "%s for RX.\n",
1360                        tp->dev->name,
1361                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1362                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1363         }
1364 }
1365
1366 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1367 {
1368         u32 new_tg3_flags = 0;
1369         u32 old_rx_mode = tp->rx_mode;
1370         u32 old_tx_mode = tp->tx_mode;
1371
1372         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1373
1374                 /* Convert 1000BaseX flow control bits to 1000BaseT
1375                  * bits before resolving flow control.
1376                  */
1377                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1378                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1379                                        ADVERTISE_PAUSE_ASYM);
1380                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1381
1382                         if (local_adv & ADVERTISE_1000XPAUSE)
1383                                 local_adv |= ADVERTISE_PAUSE_CAP;
1384                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1385                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1386                         if (remote_adv & LPA_1000XPAUSE)
1387                                 remote_adv |= LPA_PAUSE_CAP;
1388                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1389                                 remote_adv |= LPA_PAUSE_ASYM;
1390                 }
1391
1392                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1393                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1394                                 if (remote_adv & LPA_PAUSE_CAP)
1395                                         new_tg3_flags |=
1396                                                 (TG3_FLAG_RX_PAUSE |
1397                                                 TG3_FLAG_TX_PAUSE);
1398                                 else if (remote_adv & LPA_PAUSE_ASYM)
1399                                         new_tg3_flags |=
1400                                                 (TG3_FLAG_RX_PAUSE);
1401                         } else {
1402                                 if (remote_adv & LPA_PAUSE_CAP)
1403                                         new_tg3_flags |=
1404                                                 (TG3_FLAG_RX_PAUSE |
1405                                                 TG3_FLAG_TX_PAUSE);
1406                         }
1407                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1408                         if ((remote_adv & LPA_PAUSE_CAP) &&
1409                         (remote_adv & LPA_PAUSE_ASYM))
1410                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1411                 }
1412
1413                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1414                 tp->tg3_flags |= new_tg3_flags;
1415         } else {
1416                 new_tg3_flags = tp->tg3_flags;
1417         }
1418
1419         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1420                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1421         else
1422                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1423
1424         if (old_rx_mode != tp->rx_mode) {
1425                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1426         }
1427         
1428         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1429                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1430         else
1431                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1432
1433         if (old_tx_mode != tp->tx_mode) {
1434                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1435         }
1436 }
1437
1438 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1439 {
1440         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1441         case MII_TG3_AUX_STAT_10HALF:
1442                 *speed = SPEED_10;
1443                 *duplex = DUPLEX_HALF;
1444                 break;
1445
1446         case MII_TG3_AUX_STAT_10FULL:
1447                 *speed = SPEED_10;
1448                 *duplex = DUPLEX_FULL;
1449                 break;
1450
1451         case MII_TG3_AUX_STAT_100HALF:
1452                 *speed = SPEED_100;
1453                 *duplex = DUPLEX_HALF;
1454                 break;
1455
1456         case MII_TG3_AUX_STAT_100FULL:
1457                 *speed = SPEED_100;
1458                 *duplex = DUPLEX_FULL;
1459                 break;
1460
1461         case MII_TG3_AUX_STAT_1000HALF:
1462                 *speed = SPEED_1000;
1463                 *duplex = DUPLEX_HALF;
1464                 break;
1465
1466         case MII_TG3_AUX_STAT_1000FULL:
1467                 *speed = SPEED_1000;
1468                 *duplex = DUPLEX_FULL;
1469                 break;
1470
1471         default:
1472                 *speed = SPEED_INVALID;
1473                 *duplex = DUPLEX_INVALID;
1474                 break;
1475         };
1476 }
1477
1478 static void tg3_phy_copper_begin(struct tg3 *tp)
1479 {
1480         u32 new_adv;
1481         int i;
1482
1483         if (tp->link_config.phy_is_low_power) {
1484                 /* Entering low power mode.  Disable gigabit and
1485                  * 100baseT advertisements.
1486                  */
1487                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1488
1489                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1490                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1491                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1492                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1493
1494                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1495         } else if (tp->link_config.speed == SPEED_INVALID) {
1496                 tp->link_config.advertising =
1497                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1498                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1499                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1500                          ADVERTISED_Autoneg | ADVERTISED_MII);
1501
1502                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1503                         tp->link_config.advertising &=
1504                                 ~(ADVERTISED_1000baseT_Half |
1505                                   ADVERTISED_1000baseT_Full);
1506
1507                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1508                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1509                         new_adv |= ADVERTISE_10HALF;
1510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1511                         new_adv |= ADVERTISE_10FULL;
1512                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1513                         new_adv |= ADVERTISE_100HALF;
1514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1515                         new_adv |= ADVERTISE_100FULL;
1516                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1517
1518                 if (tp->link_config.advertising &
1519                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1520                         new_adv = 0;
1521                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1522                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1523                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1524                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1525                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1526                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1527                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1528                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1529                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1530                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1531                 } else {
1532                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1533                 }
1534         } else {
1535                 /* Asking for a specific link mode. */
1536                 if (tp->link_config.speed == SPEED_1000) {
1537                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1538                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1539
1540                         if (tp->link_config.duplex == DUPLEX_FULL)
1541                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1542                         else
1543                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1544                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1545                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1546                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1547                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1548                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1549                 } else {
1550                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1551
1552                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1553                         if (tp->link_config.speed == SPEED_100) {
1554                                 if (tp->link_config.duplex == DUPLEX_FULL)
1555                                         new_adv |= ADVERTISE_100FULL;
1556                                 else
1557                                         new_adv |= ADVERTISE_100HALF;
1558                         } else {
1559                                 if (tp->link_config.duplex == DUPLEX_FULL)
1560                                         new_adv |= ADVERTISE_10FULL;
1561                                 else
1562                                         new_adv |= ADVERTISE_10HALF;
1563                         }
1564                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1565                 }
1566         }
1567
1568         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1569             tp->link_config.speed != SPEED_INVALID) {
1570                 u32 bmcr, orig_bmcr;
1571
1572                 tp->link_config.active_speed = tp->link_config.speed;
1573                 tp->link_config.active_duplex = tp->link_config.duplex;
1574
1575                 bmcr = 0;
1576                 switch (tp->link_config.speed) {
1577                 default:
1578                 case SPEED_10:
1579                         break;
1580
1581                 case SPEED_100:
1582                         bmcr |= BMCR_SPEED100;
1583                         break;
1584
1585                 case SPEED_1000:
1586                         bmcr |= TG3_BMCR_SPEED1000;
1587                         break;
1588                 };
1589
1590                 if (tp->link_config.duplex == DUPLEX_FULL)
1591                         bmcr |= BMCR_FULLDPLX;
1592
1593                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1594                     (bmcr != orig_bmcr)) {
1595                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1596                         for (i = 0; i < 1500; i++) {
1597                                 u32 tmp;
1598
1599                                 udelay(10);
1600                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1601                                     tg3_readphy(tp, MII_BMSR, &tmp))
1602                                         continue;
1603                                 if (!(tmp & BMSR_LSTATUS)) {
1604                                         udelay(40);
1605                                         break;
1606                                 }
1607                         }
1608                         tg3_writephy(tp, MII_BMCR, bmcr);
1609                         udelay(40);
1610                 }
1611         } else {
1612                 tg3_writephy(tp, MII_BMCR,
1613                              BMCR_ANENABLE | BMCR_ANRESTART);
1614         }
1615 }
1616
1617 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1618 {
1619         int err;
1620
1621         /* Turn off tap power management. */
1622         /* Set Extended packet length bit */
1623         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1624
1625         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1626         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1627
1628         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1629         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1630
1631         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1632         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1633
1634         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1635         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1636
1637         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1638         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1639
1640         udelay(40);
1641
1642         return err;
1643 }
1644
1645 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1646 {
1647         u32 adv_reg, all_mask;
1648
1649         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1650                 return 0;
1651
1652         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1653                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1654         if ((adv_reg & all_mask) != all_mask)
1655                 return 0;
1656         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1657                 u32 tg3_ctrl;
1658
1659                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1660                         return 0;
1661
1662                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1663                             MII_TG3_CTRL_ADV_1000_FULL);
1664                 if ((tg3_ctrl & all_mask) != all_mask)
1665                         return 0;
1666         }
1667         return 1;
1668 }
1669
1670 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1671 {
1672         int current_link_up;
1673         u32 bmsr, dummy;
1674         u16 current_speed;
1675         u8 current_duplex;
1676         int i, err;
1677
1678         tw32(MAC_EVENT, 0);
1679
1680         tw32_f(MAC_STATUS,
1681              (MAC_STATUS_SYNC_CHANGED |
1682               MAC_STATUS_CFG_CHANGED |
1683               MAC_STATUS_MI_COMPLETION |
1684               MAC_STATUS_LNKSTATE_CHANGED));
1685         udelay(40);
1686
1687         tp->mi_mode = MAC_MI_MODE_BASE;
1688         tw32_f(MAC_MI_MODE, tp->mi_mode);
1689         udelay(80);
1690
1691         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1692
1693         /* Some third-party PHYs need to be reset on link going
1694          * down.
1695          */
1696         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1697              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1698              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1699             netif_carrier_ok(tp->dev)) {
1700                 tg3_readphy(tp, MII_BMSR, &bmsr);
1701                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1702                     !(bmsr & BMSR_LSTATUS))
1703                         force_reset = 1;
1704         }
1705         if (force_reset)
1706                 tg3_phy_reset(tp);
1707
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1709                 tg3_readphy(tp, MII_BMSR, &bmsr);
1710                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1711                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1712                         bmsr = 0;
1713
1714                 if (!(bmsr & BMSR_LSTATUS)) {
1715                         err = tg3_init_5401phy_dsp(tp);
1716                         if (err)
1717                                 return err;
1718
1719                         tg3_readphy(tp, MII_BMSR, &bmsr);
1720                         for (i = 0; i < 1000; i++) {
1721                                 udelay(10);
1722                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1723                                     (bmsr & BMSR_LSTATUS)) {
1724                                         udelay(40);
1725                                         break;
1726                                 }
1727                         }
1728
1729                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1730                             !(bmsr & BMSR_LSTATUS) &&
1731                             tp->link_config.active_speed == SPEED_1000) {
1732                                 err = tg3_phy_reset(tp);
1733                                 if (!err)
1734                                         err = tg3_init_5401phy_dsp(tp);
1735                                 if (err)
1736                                         return err;
1737                         }
1738                 }
1739         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1740                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1741                 /* 5701 {A0,B0} CRC bug workaround */
1742                 tg3_writephy(tp, 0x15, 0x0a75);
1743                 tg3_writephy(tp, 0x1c, 0x8c68);
1744                 tg3_writephy(tp, 0x1c, 0x8d68);
1745                 tg3_writephy(tp, 0x1c, 0x8c68);
1746         }
1747
1748         /* Clear pending interrupts... */
1749         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1750         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1751
1752         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1753                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1754         else
1755                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1756
1757         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1758             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1759                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1760                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1761                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1762                 else
1763                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1764         }
1765
1766         current_link_up = 0;
1767         current_speed = SPEED_INVALID;
1768         current_duplex = DUPLEX_INVALID;
1769
1770         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1771                 u32 val;
1772
1773                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1774                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1775                 if (!(val & (1 << 10))) {
1776                         val |= (1 << 10);
1777                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1778                         goto relink;
1779                 }
1780         }
1781
1782         bmsr = 0;
1783         for (i = 0; i < 100; i++) {
1784                 tg3_readphy(tp, MII_BMSR, &bmsr);
1785                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1786                     (bmsr & BMSR_LSTATUS))
1787                         break;
1788                 udelay(40);
1789         }
1790
1791         if (bmsr & BMSR_LSTATUS) {
1792                 u32 aux_stat, bmcr;
1793
1794                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1795                 for (i = 0; i < 2000; i++) {
1796                         udelay(10);
1797                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1798                             aux_stat)
1799                                 break;
1800                 }
1801
1802                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1803                                              &current_speed,
1804                                              &current_duplex);
1805
1806                 bmcr = 0;
1807                 for (i = 0; i < 200; i++) {
1808                         tg3_readphy(tp, MII_BMCR, &bmcr);
1809                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1810                                 continue;
1811                         if (bmcr && bmcr != 0x7fff)
1812                                 break;
1813                         udelay(10);
1814                 }
1815
1816                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1817                         if (bmcr & BMCR_ANENABLE) {
1818                                 current_link_up = 1;
1819
1820                                 /* Force autoneg restart if we are exiting
1821                                  * low power mode.
1822                                  */
1823                                 if (!tg3_copper_is_advertising_all(tp))
1824                                         current_link_up = 0;
1825                         } else {
1826                                 current_link_up = 0;
1827                         }
1828                 } else {
1829                         if (!(bmcr & BMCR_ANENABLE) &&
1830                             tp->link_config.speed == current_speed &&
1831                             tp->link_config.duplex == current_duplex) {
1832                                 current_link_up = 1;
1833                         } else {
1834                                 current_link_up = 0;
1835                         }
1836                 }
1837
1838                 tp->link_config.active_speed = current_speed;
1839                 tp->link_config.active_duplex = current_duplex;
1840         }
1841
1842         if (current_link_up == 1 &&
1843             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1844             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1845                 u32 local_adv, remote_adv;
1846
1847                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1848                         local_adv = 0;
1849                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1850
1851                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1852                         remote_adv = 0;
1853
1854                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1855
1856                 /* If we are not advertising full pause capability,
1857                  * something is wrong.  Bring the link down and reconfigure.
1858                  */
1859                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1860                         current_link_up = 0;
1861                 } else {
1862                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1863                 }
1864         }
1865 relink:
1866         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1867                 u32 tmp;
1868
1869                 tg3_phy_copper_begin(tp);
1870
1871                 tg3_readphy(tp, MII_BMSR, &tmp);
1872                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1873                     (tmp & BMSR_LSTATUS))
1874                         current_link_up = 1;
1875         }
1876
1877         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1878         if (current_link_up == 1) {
1879                 if (tp->link_config.active_speed == SPEED_100 ||
1880                     tp->link_config.active_speed == SPEED_10)
1881                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1882                 else
1883                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1884         } else
1885                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886
1887         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1888         if (tp->link_config.active_duplex == DUPLEX_HALF)
1889                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1890
1891         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1893                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1894                     (current_link_up == 1 &&
1895                      tp->link_config.active_speed == SPEED_10))
1896                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1897         } else {
1898                 if (current_link_up == 1)
1899                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1900         }
1901
1902         /* ??? Without this setting Netgear GA302T PHY does not
1903          * ??? send/receive packets...
1904          */
1905         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1906             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1907                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1908                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1909                 udelay(80);
1910         }
1911
1912         tw32_f(MAC_MODE, tp->mac_mode);
1913         udelay(40);
1914
1915         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1916                 /* Polled via timer. */
1917                 tw32_f(MAC_EVENT, 0);
1918         } else {
1919                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1920         }
1921         udelay(40);
1922
1923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1924             current_link_up == 1 &&
1925             tp->link_config.active_speed == SPEED_1000 &&
1926             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1927              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1928                 udelay(120);
1929                 tw32_f(MAC_STATUS,
1930                      (MAC_STATUS_SYNC_CHANGED |
1931                       MAC_STATUS_CFG_CHANGED));
1932                 udelay(40);
1933                 tg3_write_mem(tp,
1934                               NIC_SRAM_FIRMWARE_MBOX,
1935                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1936         }
1937
1938         if (current_link_up != netif_carrier_ok(tp->dev)) {
1939                 if (current_link_up)
1940                         netif_carrier_on(tp->dev);
1941                 else
1942                         netif_carrier_off(tp->dev);
1943                 tg3_link_report(tp);
1944         }
1945
1946         return 0;
1947 }
1948
1949 struct tg3_fiber_aneginfo {
1950         int state;
1951 #define ANEG_STATE_UNKNOWN              0
1952 #define ANEG_STATE_AN_ENABLE            1
1953 #define ANEG_STATE_RESTART_INIT         2
1954 #define ANEG_STATE_RESTART              3
1955 #define ANEG_STATE_DISABLE_LINK_OK      4
1956 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1957 #define ANEG_STATE_ABILITY_DETECT       6
1958 #define ANEG_STATE_ACK_DETECT_INIT      7
1959 #define ANEG_STATE_ACK_DETECT           8
1960 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1961 #define ANEG_STATE_COMPLETE_ACK         10
1962 #define ANEG_STATE_IDLE_DETECT_INIT     11
1963 #define ANEG_STATE_IDLE_DETECT          12
1964 #define ANEG_STATE_LINK_OK              13
1965 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1966 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1967
1968         u32 flags;
1969 #define MR_AN_ENABLE            0x00000001
1970 #define MR_RESTART_AN           0x00000002
1971 #define MR_AN_COMPLETE          0x00000004
1972 #define MR_PAGE_RX              0x00000008
1973 #define MR_NP_LOADED            0x00000010
1974 #define MR_TOGGLE_TX            0x00000020
1975 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1976 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1977 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1978 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1979 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1980 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1981 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1982 #define MR_TOGGLE_RX            0x00002000
1983 #define MR_NP_RX                0x00004000
1984
1985 #define MR_LINK_OK              0x80000000
1986
1987         unsigned long link_time, cur_time;
1988
1989         u32 ability_match_cfg;
1990         int ability_match_count;
1991
1992         char ability_match, idle_match, ack_match;
1993
1994         u32 txconfig, rxconfig;
1995 #define ANEG_CFG_NP             0x00000080
1996 #define ANEG_CFG_ACK            0x00000040
1997 #define ANEG_CFG_RF2            0x00000020
1998 #define ANEG_CFG_RF1            0x00000010
1999 #define ANEG_CFG_PS2            0x00000001
2000 #define ANEG_CFG_PS1            0x00008000
2001 #define ANEG_CFG_HD             0x00004000
2002 #define ANEG_CFG_FD             0x00002000
2003 #define ANEG_CFG_INVAL          0x00001f06
2004
2005 };
2006 #define ANEG_OK         0
2007 #define ANEG_DONE       1
2008 #define ANEG_TIMER_ENAB 2
2009 #define ANEG_FAILED     -1
2010
2011 #define ANEG_STATE_SETTLE_TIME  10000
2012
2013 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2014                                    struct tg3_fiber_aneginfo *ap)
2015 {
2016         unsigned long delta;
2017         u32 rx_cfg_reg;
2018         int ret;
2019
2020         if (ap->state == ANEG_STATE_UNKNOWN) {
2021                 ap->rxconfig = 0;
2022                 ap->link_time = 0;
2023                 ap->cur_time = 0;
2024                 ap->ability_match_cfg = 0;
2025                 ap->ability_match_count = 0;
2026                 ap->ability_match = 0;
2027                 ap->idle_match = 0;
2028                 ap->ack_match = 0;
2029         }
2030         ap->cur_time++;
2031
2032         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2033                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2034
2035                 if (rx_cfg_reg != ap->ability_match_cfg) {
2036                         ap->ability_match_cfg = rx_cfg_reg;
2037                         ap->ability_match = 0;
2038                         ap->ability_match_count = 0;
2039                 } else {
2040                         if (++ap->ability_match_count > 1) {
2041                                 ap->ability_match = 1;
2042                                 ap->ability_match_cfg = rx_cfg_reg;
2043                         }
2044                 }
2045                 if (rx_cfg_reg & ANEG_CFG_ACK)
2046                         ap->ack_match = 1;
2047                 else
2048                         ap->ack_match = 0;
2049
2050                 ap->idle_match = 0;
2051         } else {
2052                 ap->idle_match = 1;
2053                 ap->ability_match_cfg = 0;
2054                 ap->ability_match_count = 0;
2055                 ap->ability_match = 0;
2056                 ap->ack_match = 0;
2057
2058                 rx_cfg_reg = 0;
2059         }
2060
2061         ap->rxconfig = rx_cfg_reg;
2062         ret = ANEG_OK;
2063
2064         switch(ap->state) {
2065         case ANEG_STATE_UNKNOWN:
2066                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2067                         ap->state = ANEG_STATE_AN_ENABLE;
2068
2069                 /* fallthru */
2070         case ANEG_STATE_AN_ENABLE:
2071                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2072                 if (ap->flags & MR_AN_ENABLE) {
2073                         ap->link_time = 0;
2074                         ap->cur_time = 0;
2075                         ap->ability_match_cfg = 0;
2076                         ap->ability_match_count = 0;
2077                         ap->ability_match = 0;
2078                         ap->idle_match = 0;
2079                         ap->ack_match = 0;
2080
2081                         ap->state = ANEG_STATE_RESTART_INIT;
2082                 } else {
2083                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2084                 }
2085                 break;
2086
2087         case ANEG_STATE_RESTART_INIT:
2088                 ap->link_time = ap->cur_time;
2089                 ap->flags &= ~(MR_NP_LOADED);
2090                 ap->txconfig = 0;
2091                 tw32(MAC_TX_AUTO_NEG, 0);
2092                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2093                 tw32_f(MAC_MODE, tp->mac_mode);
2094                 udelay(40);
2095
2096                 ret = ANEG_TIMER_ENAB;
2097                 ap->state = ANEG_STATE_RESTART;
2098
2099                 /* fallthru */
2100         case ANEG_STATE_RESTART:
2101                 delta = ap->cur_time - ap->link_time;
2102                 if (delta > ANEG_STATE_SETTLE_TIME) {
2103                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2104                 } else {
2105                         ret = ANEG_TIMER_ENAB;
2106                 }
2107                 break;
2108
2109         case ANEG_STATE_DISABLE_LINK_OK:
2110                 ret = ANEG_DONE;
2111                 break;
2112
2113         case ANEG_STATE_ABILITY_DETECT_INIT:
2114                 ap->flags &= ~(MR_TOGGLE_TX);
2115                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2116                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2117                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2118                 tw32_f(MAC_MODE, tp->mac_mode);
2119                 udelay(40);
2120
2121                 ap->state = ANEG_STATE_ABILITY_DETECT;
2122                 break;
2123
2124         case ANEG_STATE_ABILITY_DETECT:
2125                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2126                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2127                 }
2128                 break;
2129
2130         case ANEG_STATE_ACK_DETECT_INIT:
2131                 ap->txconfig |= ANEG_CFG_ACK;
2132                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2133                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2134                 tw32_f(MAC_MODE, tp->mac_mode);
2135                 udelay(40);
2136
2137                 ap->state = ANEG_STATE_ACK_DETECT;
2138
2139                 /* fallthru */
2140         case ANEG_STATE_ACK_DETECT:
2141                 if (ap->ack_match != 0) {
2142                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2143                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2144                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2145                         } else {
2146                                 ap->state = ANEG_STATE_AN_ENABLE;
2147                         }
2148                 } else if (ap->ability_match != 0 &&
2149                            ap->rxconfig == 0) {
2150                         ap->state = ANEG_STATE_AN_ENABLE;
2151                 }
2152                 break;
2153
2154         case ANEG_STATE_COMPLETE_ACK_INIT:
2155                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2156                         ret = ANEG_FAILED;
2157                         break;
2158                 }
2159                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2160                                MR_LP_ADV_HALF_DUPLEX |
2161                                MR_LP_ADV_SYM_PAUSE |
2162                                MR_LP_ADV_ASYM_PAUSE |
2163                                MR_LP_ADV_REMOTE_FAULT1 |
2164                                MR_LP_ADV_REMOTE_FAULT2 |
2165                                MR_LP_ADV_NEXT_PAGE |
2166                                MR_TOGGLE_RX |
2167                                MR_NP_RX);
2168                 if (ap->rxconfig & ANEG_CFG_FD)
2169                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2170                 if (ap->rxconfig & ANEG_CFG_HD)
2171                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2172                 if (ap->rxconfig & ANEG_CFG_PS1)
2173                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2174                 if (ap->rxconfig & ANEG_CFG_PS2)
2175                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2176                 if (ap->rxconfig & ANEG_CFG_RF1)
2177                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2178                 if (ap->rxconfig & ANEG_CFG_RF2)
2179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2180                 if (ap->rxconfig & ANEG_CFG_NP)
2181                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2182
2183                 ap->link_time = ap->cur_time;
2184
2185                 ap->flags ^= (MR_TOGGLE_TX);
2186                 if (ap->rxconfig & 0x0008)
2187                         ap->flags |= MR_TOGGLE_RX;
2188                 if (ap->rxconfig & ANEG_CFG_NP)
2189                         ap->flags |= MR_NP_RX;
2190                 ap->flags |= MR_PAGE_RX;
2191
2192                 ap->state = ANEG_STATE_COMPLETE_ACK;
2193                 ret = ANEG_TIMER_ENAB;
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK:
2197                 if (ap->ability_match != 0 &&
2198                     ap->rxconfig == 0) {
2199                         ap->state = ANEG_STATE_AN_ENABLE;
2200                         break;
2201                 }
2202                 delta = ap->cur_time - ap->link_time;
2203                 if (delta > ANEG_STATE_SETTLE_TIME) {
2204                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2205                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2206                         } else {
2207                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2208                                     !(ap->flags & MR_NP_RX)) {
2209                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2210                                 } else {
2211                                         ret = ANEG_FAILED;
2212                                 }
2213                         }
2214                 }
2215                 break;
2216
2217         case ANEG_STATE_IDLE_DETECT_INIT:
2218                 ap->link_time = ap->cur_time;
2219                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2220                 tw32_f(MAC_MODE, tp->mac_mode);
2221                 udelay(40);
2222
2223                 ap->state = ANEG_STATE_IDLE_DETECT;
2224                 ret = ANEG_TIMER_ENAB;
2225                 break;
2226
2227         case ANEG_STATE_IDLE_DETECT:
2228                 if (ap->ability_match != 0 &&
2229                     ap->rxconfig == 0) {
2230                         ap->state = ANEG_STATE_AN_ENABLE;
2231                         break;
2232                 }
2233                 delta = ap->cur_time - ap->link_time;
2234                 if (delta > ANEG_STATE_SETTLE_TIME) {
2235                         /* XXX another gem from the Broadcom driver :( */
2236                         ap->state = ANEG_STATE_LINK_OK;
2237                 }
2238                 break;
2239
2240         case ANEG_STATE_LINK_OK:
2241                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2242                 ret = ANEG_DONE;
2243                 break;
2244
2245         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2246                 /* ??? unimplemented */
2247                 break;
2248
2249         case ANEG_STATE_NEXT_PAGE_WAIT:
2250                 /* ??? unimplemented */
2251                 break;
2252
2253         default:
2254                 ret = ANEG_FAILED;
2255                 break;
2256         };
2257
2258         return ret;
2259 }
2260
2261 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2262 {
2263         int res = 0;
2264         struct tg3_fiber_aneginfo aninfo;
2265         int status = ANEG_FAILED;
2266         unsigned int tick;
2267         u32 tmp;
2268
2269         tw32_f(MAC_TX_AUTO_NEG, 0);
2270
2271         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2272         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2273         udelay(40);
2274
2275         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2276         udelay(40);
2277
2278         memset(&aninfo, 0, sizeof(aninfo));
2279         aninfo.flags |= MR_AN_ENABLE;
2280         aninfo.state = ANEG_STATE_UNKNOWN;
2281         aninfo.cur_time = 0;
2282         tick = 0;
2283         while (++tick < 195000) {
2284                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2285                 if (status == ANEG_DONE || status == ANEG_FAILED)
2286                         break;
2287
2288                 udelay(1);
2289         }
2290
2291         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2292         tw32_f(MAC_MODE, tp->mac_mode);
2293         udelay(40);
2294
2295         *flags = aninfo.flags;
2296
2297         if (status == ANEG_DONE &&
2298             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2299                              MR_LP_ADV_FULL_DUPLEX)))
2300                 res = 1;
2301
2302         return res;
2303 }
2304
2305 static void tg3_init_bcm8002(struct tg3 *tp)
2306 {
2307         u32 mac_status = tr32(MAC_STATUS);
2308         int i;
2309
2310         /* Reset when initting first time or we have a link. */
2311         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2312             !(mac_status & MAC_STATUS_PCS_SYNCED))
2313                 return;
2314
2315         /* Set PLL lock range. */
2316         tg3_writephy(tp, 0x16, 0x8007);
2317
2318         /* SW reset */
2319         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2320
2321         /* Wait for reset to complete. */
2322         /* XXX schedule_timeout() ... */
2323         for (i = 0; i < 500; i++)
2324                 udelay(10);
2325
2326         /* Config mode; select PMA/Ch 1 regs. */
2327         tg3_writephy(tp, 0x10, 0x8411);
2328
2329         /* Enable auto-lock and comdet, select txclk for tx. */
2330         tg3_writephy(tp, 0x11, 0x0a10);
2331
2332         tg3_writephy(tp, 0x18, 0x00a0);
2333         tg3_writephy(tp, 0x16, 0x41ff);
2334
2335         /* Assert and deassert POR. */
2336         tg3_writephy(tp, 0x13, 0x0400);
2337         udelay(40);
2338         tg3_writephy(tp, 0x13, 0x0000);
2339
2340         tg3_writephy(tp, 0x11, 0x0a50);
2341         udelay(40);
2342         tg3_writephy(tp, 0x11, 0x0a10);
2343
2344         /* Wait for signal to stabilize */
2345         /* XXX schedule_timeout() ... */
2346         for (i = 0; i < 15000; i++)
2347                 udelay(10);
2348
2349         /* Deselect the channel register so we can read the PHYID
2350          * later.
2351          */
2352         tg3_writephy(tp, 0x10, 0x8011);
2353 }
2354
2355 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2356 {
2357         u32 sg_dig_ctrl, sg_dig_status;
2358         u32 serdes_cfg, expected_sg_dig_ctrl;
2359         int workaround, port_a;
2360         int current_link_up;
2361
2362         serdes_cfg = 0;
2363         expected_sg_dig_ctrl = 0;
2364         workaround = 0;
2365         port_a = 1;
2366         current_link_up = 0;
2367
2368         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2369             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2370                 workaround = 1;
2371                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2372                         port_a = 0;
2373
2374                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2375                 /* preserve bits 20-23 for voltage regulator */
2376                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2377         }
2378
2379         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2380
2381         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2382                 if (sg_dig_ctrl & (1 << 31)) {
2383                         if (workaround) {
2384                                 u32 val = serdes_cfg;
2385
2386                                 if (port_a)
2387                                         val |= 0xc010000;
2388                                 else
2389                                         val |= 0x4010000;
2390                                 tw32_f(MAC_SERDES_CFG, val);
2391                         }
2392                         tw32_f(SG_DIG_CTRL, 0x01388400);
2393                 }
2394                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2395                         tg3_setup_flow_control(tp, 0, 0);
2396                         current_link_up = 1;
2397                 }
2398                 goto out;
2399         }
2400
2401         /* Want auto-negotiation.  */
2402         expected_sg_dig_ctrl = 0x81388400;
2403
2404         /* Pause capability */
2405         expected_sg_dig_ctrl |= (1 << 11);
2406
2407         /* Asymettric pause */
2408         expected_sg_dig_ctrl |= (1 << 12);
2409
2410         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2411                 if (workaround)
2412                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2413                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2414                 udelay(5);
2415                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2416
2417                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2418         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2419                                  MAC_STATUS_SIGNAL_DET)) {
2420                 int i;
2421
2422                 /* Giver time to negotiate (~200ms) */
2423                 for (i = 0; i < 40000; i++) {
2424                         sg_dig_status = tr32(SG_DIG_STATUS);
2425                         if (sg_dig_status & (0x3))
2426                                 break;
2427                         udelay(5);
2428                 }
2429                 mac_status = tr32(MAC_STATUS);
2430
2431                 if ((sg_dig_status & (1 << 1)) &&
2432                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2433                         u32 local_adv, remote_adv;
2434
2435                         local_adv = ADVERTISE_PAUSE_CAP;
2436                         remote_adv = 0;
2437                         if (sg_dig_status & (1 << 19))
2438                                 remote_adv |= LPA_PAUSE_CAP;
2439                         if (sg_dig_status & (1 << 20))
2440                                 remote_adv |= LPA_PAUSE_ASYM;
2441
2442                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2443                         current_link_up = 1;
2444                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2445                 } else if (!(sg_dig_status & (1 << 1))) {
2446                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2447                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2448                         else {
2449                                 if (workaround) {
2450                                         u32 val = serdes_cfg;
2451
2452                                         if (port_a)
2453                                                 val |= 0xc010000;
2454                                         else
2455                                                 val |= 0x4010000;
2456
2457                                         tw32_f(MAC_SERDES_CFG, val);
2458                                 }
2459
2460                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2461                                 udelay(40);
2462
2463                                 /* Link parallel detection - link is up */
2464                                 /* only if we have PCS_SYNC and not */
2465                                 /* receiving config code words */
2466                                 mac_status = tr32(MAC_STATUS);
2467                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2468                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2469                                         tg3_setup_flow_control(tp, 0, 0);
2470                                         current_link_up = 1;
2471                                 }
2472                         }
2473                 }
2474         }
2475
2476 out:
2477         return current_link_up;
2478 }
2479
2480 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2481 {
2482         int current_link_up = 0;
2483
2484         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2485                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2486                 goto out;
2487         }
2488
2489         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2490                 u32 flags;
2491                 int i;
2492   
2493                 if (fiber_autoneg(tp, &flags)) {
2494                         u32 local_adv, remote_adv;
2495
2496                         local_adv = ADVERTISE_PAUSE_CAP;
2497                         remote_adv = 0;
2498                         if (flags & MR_LP_ADV_SYM_PAUSE)
2499                                 remote_adv |= LPA_PAUSE_CAP;
2500                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2501                                 remote_adv |= LPA_PAUSE_ASYM;
2502
2503                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2504
2505                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2506                         current_link_up = 1;
2507                 }
2508                 for (i = 0; i < 30; i++) {
2509                         udelay(20);
2510                         tw32_f(MAC_STATUS,
2511                                (MAC_STATUS_SYNC_CHANGED |
2512                                 MAC_STATUS_CFG_CHANGED));
2513                         udelay(40);
2514                         if ((tr32(MAC_STATUS) &
2515                              (MAC_STATUS_SYNC_CHANGED |
2516                               MAC_STATUS_CFG_CHANGED)) == 0)
2517                                 break;
2518                 }
2519
2520                 mac_status = tr32(MAC_STATUS);
2521                 if (current_link_up == 0 &&
2522                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2523                     !(mac_status & MAC_STATUS_RCVD_CFG))
2524                         current_link_up = 1;
2525         } else {
2526                 /* Forcing 1000FD link up. */
2527                 current_link_up = 1;
2528                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2529
2530                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2531                 udelay(40);
2532         }
2533
2534 out:
2535         return current_link_up;
2536 }
2537
2538 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2539 {
2540         u32 orig_pause_cfg;
2541         u16 orig_active_speed;
2542         u8 orig_active_duplex;
2543         u32 mac_status;
2544         int current_link_up;
2545         int i;
2546
2547         orig_pause_cfg =
2548                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2549                                   TG3_FLAG_TX_PAUSE));
2550         orig_active_speed = tp->link_config.active_speed;
2551         orig_active_duplex = tp->link_config.active_duplex;
2552
2553         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2554             netif_carrier_ok(tp->dev) &&
2555             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2556                 mac_status = tr32(MAC_STATUS);
2557                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2558                                MAC_STATUS_SIGNAL_DET |
2559                                MAC_STATUS_CFG_CHANGED |
2560                                MAC_STATUS_RCVD_CFG);
2561                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2562                                    MAC_STATUS_SIGNAL_DET)) {
2563                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2564                                             MAC_STATUS_CFG_CHANGED));
2565                         return 0;
2566                 }
2567         }
2568
2569         tw32_f(MAC_TX_AUTO_NEG, 0);
2570
2571         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2572         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2573         tw32_f(MAC_MODE, tp->mac_mode);
2574         udelay(40);
2575
2576         if (tp->phy_id == PHY_ID_BCM8002)
2577                 tg3_init_bcm8002(tp);
2578
2579         /* Enable link change event even when serdes polling.  */
2580         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2581         udelay(40);
2582
2583         current_link_up = 0;
2584         mac_status = tr32(MAC_STATUS);
2585
2586         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2587                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2588         else
2589                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2590
2591         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2592         tw32_f(MAC_MODE, tp->mac_mode);
2593         udelay(40);
2594
2595         tp->hw_status->status =
2596                 (SD_STATUS_UPDATED |
2597                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2598
2599         for (i = 0; i < 100; i++) {
2600                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2601                                     MAC_STATUS_CFG_CHANGED));
2602                 udelay(5);
2603                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2604                                          MAC_STATUS_CFG_CHANGED)) == 0)
2605                         break;
2606         }
2607
2608         mac_status = tr32(MAC_STATUS);
2609         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2610                 current_link_up = 0;
2611                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2612                         tw32_f(MAC_MODE, (tp->mac_mode |
2613                                           MAC_MODE_SEND_CONFIGS));
2614                         udelay(1);
2615                         tw32_f(MAC_MODE, tp->mac_mode);
2616                 }
2617         }
2618
2619         if (current_link_up == 1) {
2620                 tp->link_config.active_speed = SPEED_1000;
2621                 tp->link_config.active_duplex = DUPLEX_FULL;
2622                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2623                                     LED_CTRL_LNKLED_OVERRIDE |
2624                                     LED_CTRL_1000MBPS_ON));
2625         } else {
2626                 tp->link_config.active_speed = SPEED_INVALID;
2627                 tp->link_config.active_duplex = DUPLEX_INVALID;
2628                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2629                                     LED_CTRL_LNKLED_OVERRIDE |
2630                                     LED_CTRL_TRAFFIC_OVERRIDE));
2631         }
2632
2633         if (current_link_up != netif_carrier_ok(tp->dev)) {
2634                 if (current_link_up)
2635                         netif_carrier_on(tp->dev);
2636                 else
2637                         netif_carrier_off(tp->dev);
2638                 tg3_link_report(tp);
2639         } else {
2640                 u32 now_pause_cfg =
2641                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2642                                          TG3_FLAG_TX_PAUSE);
2643                 if (orig_pause_cfg != now_pause_cfg ||
2644                     orig_active_speed != tp->link_config.active_speed ||
2645                     orig_active_duplex != tp->link_config.active_duplex)
2646                         tg3_link_report(tp);
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2653 {
2654         int current_link_up, err = 0;
2655         u32 bmsr, bmcr;
2656         u16 current_speed;
2657         u8 current_duplex;
2658
2659         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2660         tw32_f(MAC_MODE, tp->mac_mode);
2661         udelay(40);
2662
2663         tw32(MAC_EVENT, 0);
2664
2665         tw32_f(MAC_STATUS,
2666              (MAC_STATUS_SYNC_CHANGED |
2667               MAC_STATUS_CFG_CHANGED |
2668               MAC_STATUS_MI_COMPLETION |
2669               MAC_STATUS_LNKSTATE_CHANGED));
2670         udelay(40);
2671
2672         if (force_reset)
2673                 tg3_phy_reset(tp);
2674
2675         current_link_up = 0;
2676         current_speed = SPEED_INVALID;
2677         current_duplex = DUPLEX_INVALID;
2678
2679         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2680         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2681
2682         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2683
2684         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2685             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2686                 /* do nothing, just check for link up at the end */
2687         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2688                 u32 adv, new_adv;
2689
2690                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2691                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2692                                   ADVERTISE_1000XPAUSE |
2693                                   ADVERTISE_1000XPSE_ASYM |
2694                                   ADVERTISE_SLCT);
2695
2696                 /* Always advertise symmetric PAUSE just like copper */
2697                 new_adv |= ADVERTISE_1000XPAUSE;
2698
2699                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2700                         new_adv |= ADVERTISE_1000XHALF;
2701                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2702                         new_adv |= ADVERTISE_1000XFULL;
2703
2704                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2705                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2706                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2707                         tg3_writephy(tp, MII_BMCR, bmcr);
2708
2709                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2710                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2711                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2712
2713                         return err;
2714                 }
2715         } else {
2716                 u32 new_bmcr;
2717
2718                 bmcr &= ~BMCR_SPEED1000;
2719                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2720
2721                 if (tp->link_config.duplex == DUPLEX_FULL)
2722                         new_bmcr |= BMCR_FULLDPLX;
2723
2724                 if (new_bmcr != bmcr) {
2725                         /* BMCR_SPEED1000 is a reserved bit that needs
2726                          * to be set on write.
2727                          */
2728                         new_bmcr |= BMCR_SPEED1000;
2729
2730                         /* Force a linkdown */
2731                         if (netif_carrier_ok(tp->dev)) {
2732                                 u32 adv;
2733
2734                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2735                                 adv &= ~(ADVERTISE_1000XFULL |
2736                                          ADVERTISE_1000XHALF |
2737                                          ADVERTISE_SLCT);
2738                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2739                                 tg3_writephy(tp, MII_BMCR, bmcr |
2740                                                            BMCR_ANRESTART |
2741                                                            BMCR_ANENABLE);
2742                                 udelay(10);
2743                                 netif_carrier_off(tp->dev);
2744                         }
2745                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2746                         bmcr = new_bmcr;
2747                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2748                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2749                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2750                 }
2751         }
2752
2753         if (bmsr & BMSR_LSTATUS) {
2754                 current_speed = SPEED_1000;
2755                 current_link_up = 1;
2756                 if (bmcr & BMCR_FULLDPLX)
2757                         current_duplex = DUPLEX_FULL;
2758                 else
2759                         current_duplex = DUPLEX_HALF;
2760
2761                 if (bmcr & BMCR_ANENABLE) {
2762                         u32 local_adv, remote_adv, common;
2763
2764                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2765                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2766                         common = local_adv & remote_adv;
2767                         if (common & (ADVERTISE_1000XHALF |
2768                                       ADVERTISE_1000XFULL)) {
2769                                 if (common & ADVERTISE_1000XFULL)
2770                                         current_duplex = DUPLEX_FULL;
2771                                 else
2772                                         current_duplex = DUPLEX_HALF;
2773
2774                                 tg3_setup_flow_control(tp, local_adv,
2775                                                        remote_adv);
2776                         }
2777                         else
2778                                 current_link_up = 0;
2779                 }
2780         }
2781
2782         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2783         if (tp->link_config.active_duplex == DUPLEX_HALF)
2784                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2785
2786         tw32_f(MAC_MODE, tp->mac_mode);
2787         udelay(40);
2788
2789         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2790
2791         tp->link_config.active_speed = current_speed;
2792         tp->link_config.active_duplex = current_duplex;
2793
2794         if (current_link_up != netif_carrier_ok(tp->dev)) {
2795                 if (current_link_up)
2796                         netif_carrier_on(tp->dev);
2797                 else {
2798                         netif_carrier_off(tp->dev);
2799                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2800                 }
2801                 tg3_link_report(tp);
2802         }
2803         return err;
2804 }
2805
2806 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2807 {
2808         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2809                 /* Give autoneg time to complete. */
2810                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2811                 return;
2812         }
2813         if (!netif_carrier_ok(tp->dev) &&
2814             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2815                 u32 bmcr;
2816
2817                 tg3_readphy(tp, MII_BMCR, &bmcr);
2818                 if (bmcr & BMCR_ANENABLE) {
2819                         u32 phy1, phy2;
2820
2821                         /* Select shadow register 0x1f */
2822                         tg3_writephy(tp, 0x1c, 0x7c00);
2823                         tg3_readphy(tp, 0x1c, &phy1);
2824
2825                         /* Select expansion interrupt status register */
2826                         tg3_writephy(tp, 0x17, 0x0f01);
2827                         tg3_readphy(tp, 0x15, &phy2);
2828                         tg3_readphy(tp, 0x15, &phy2);
2829
2830                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2831                                 /* We have signal detect and not receiving
2832                                  * config code words, link is up by parallel
2833                                  * detection.
2834                                  */
2835
2836                                 bmcr &= ~BMCR_ANENABLE;
2837                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2838                                 tg3_writephy(tp, MII_BMCR, bmcr);
2839                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2840                         }
2841                 }
2842         }
2843         else if (netif_carrier_ok(tp->dev) &&
2844                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2845                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2846                 u32 phy2;
2847
2848                 /* Select expansion interrupt status register */
2849                 tg3_writephy(tp, 0x17, 0x0f01);
2850                 tg3_readphy(tp, 0x15, &phy2);
2851                 if (phy2 & 0x20) {
2852                         u32 bmcr;
2853
2854                         /* Config code words received, turn on autoneg. */
2855                         tg3_readphy(tp, MII_BMCR, &bmcr);
2856                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2857
2858                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2859
2860                 }
2861         }
2862 }
2863
2864 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2865 {
2866         int err;
2867
2868         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2869                 err = tg3_setup_fiber_phy(tp, force_reset);
2870         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2871                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2872         } else {
2873                 err = tg3_setup_copper_phy(tp, force_reset);
2874         }
2875
2876         if (tp->link_config.active_speed == SPEED_1000 &&
2877             tp->link_config.active_duplex == DUPLEX_HALF)
2878                 tw32(MAC_TX_LENGTHS,
2879                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2880                       (6 << TX_LENGTHS_IPG_SHIFT) |
2881                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2882         else
2883                 tw32(MAC_TX_LENGTHS,
2884                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2885                       (6 << TX_LENGTHS_IPG_SHIFT) |
2886                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2887
2888         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2889                 if (netif_carrier_ok(tp->dev)) {
2890                         tw32(HOSTCC_STAT_COAL_TICKS,
2891                              tp->coal.stats_block_coalesce_usecs);
2892                 } else {
2893                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2894                 }
2895         }
2896
2897         return err;
2898 }
2899
2900 /* Tigon3 never reports partial packet sends.  So we do not
2901  * need special logic to handle SKBs that have not had all
2902  * of their frags sent yet, like SunGEM does.
2903  */
2904 static void tg3_tx(struct tg3 *tp)
2905 {
2906         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2907         u32 sw_idx = tp->tx_cons;
2908
2909         while (sw_idx != hw_idx) {
2910                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2911                 struct sk_buff *skb = ri->skb;
2912                 int i;
2913
2914                 if (unlikely(skb == NULL))
2915                         BUG();
2916
2917                 pci_unmap_single(tp->pdev,
2918                                  pci_unmap_addr(ri, mapping),
2919                                  skb_headlen(skb),
2920                                  PCI_DMA_TODEVICE);
2921
2922                 ri->skb = NULL;
2923
2924                 sw_idx = NEXT_TX(sw_idx);
2925
2926                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2927                         if (unlikely(sw_idx == hw_idx))
2928                                 BUG();
2929
2930                         ri = &tp->tx_buffers[sw_idx];
2931                         if (unlikely(ri->skb != NULL))
2932                                 BUG();
2933
2934                         pci_unmap_page(tp->pdev,
2935                                        pci_unmap_addr(ri, mapping),
2936                                        skb_shinfo(skb)->frags[i].size,
2937                                        PCI_DMA_TODEVICE);
2938
2939                         sw_idx = NEXT_TX(sw_idx);
2940                 }
2941
2942                 dev_kfree_skb(skb);
2943         }
2944
2945         tp->tx_cons = sw_idx;
2946
2947         if (unlikely(netif_queue_stopped(tp->dev))) {
2948                 spin_lock(&tp->tx_lock);
2949                 if (netif_queue_stopped(tp->dev) &&
2950                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2951                         netif_wake_queue(tp->dev);
2952                 spin_unlock(&tp->tx_lock);
2953         }
2954 }
2955
2956 /* Returns size of skb allocated or < 0 on error.
2957  *
2958  * We only need to fill in the address because the other members
2959  * of the RX descriptor are invariant, see tg3_init_rings.
2960  *
2961  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2962  * posting buffers we only dirty the first cache line of the RX
2963  * descriptor (containing the address).  Whereas for the RX status
2964  * buffers the cpu only reads the last cacheline of the RX descriptor
2965  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2966  */
2967 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2968                             int src_idx, u32 dest_idx_unmasked)
2969 {
2970         struct tg3_rx_buffer_desc *desc;
2971         struct ring_info *map, *src_map;
2972         struct sk_buff *skb;
2973         dma_addr_t mapping;
2974         int skb_size, dest_idx;
2975
2976         src_map = NULL;
2977         switch (opaque_key) {
2978         case RXD_OPAQUE_RING_STD:
2979                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2980                 desc = &tp->rx_std[dest_idx];
2981                 map = &tp->rx_std_buffers[dest_idx];
2982                 if (src_idx >= 0)
2983                         src_map = &tp->rx_std_buffers[src_idx];
2984                 skb_size = tp->rx_pkt_buf_sz;
2985                 break;
2986
2987         case RXD_OPAQUE_RING_JUMBO:
2988                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2989                 desc = &tp->rx_jumbo[dest_idx];
2990                 map = &tp->rx_jumbo_buffers[dest_idx];
2991                 if (src_idx >= 0)
2992                         src_map = &tp->rx_jumbo_buffers[src_idx];
2993                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2994                 break;
2995
2996         default:
2997                 return -EINVAL;
2998         };
2999
3000         /* Do not overwrite any of the map or rp information
3001          * until we are sure we can commit to a new buffer.
3002          *
3003          * Callers depend upon this behavior and assume that
3004          * we leave everything unchanged if we fail.
3005          */
3006         skb = dev_alloc_skb(skb_size);
3007         if (skb == NULL)
3008                 return -ENOMEM;
3009
3010         skb->dev = tp->dev;
3011         skb_reserve(skb, tp->rx_offset);
3012
3013         mapping = pci_map_single(tp->pdev, skb->data,
3014                                  skb_size - tp->rx_offset,
3015                                  PCI_DMA_FROMDEVICE);
3016
3017         map->skb = skb;
3018         pci_unmap_addr_set(map, mapping, mapping);
3019
3020         if (src_map != NULL)
3021                 src_map->skb = NULL;
3022
3023         desc->addr_hi = ((u64)mapping >> 32);
3024         desc->addr_lo = ((u64)mapping & 0xffffffff);
3025
3026         return skb_size;
3027 }
3028
3029 /* We only need to move over in the address because the other
3030  * members of the RX descriptor are invariant.  See notes above
3031  * tg3_alloc_rx_skb for full details.
3032  */
3033 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3034                            int src_idx, u32 dest_idx_unmasked)
3035 {
3036         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3037         struct ring_info *src_map, *dest_map;
3038         int dest_idx;
3039
3040         switch (opaque_key) {
3041         case RXD_OPAQUE_RING_STD:
3042                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3043                 dest_desc = &tp->rx_std[dest_idx];
3044                 dest_map = &tp->rx_std_buffers[dest_idx];
3045                 src_desc = &tp->rx_std[src_idx];
3046                 src_map = &tp->rx_std_buffers[src_idx];
3047                 break;
3048
3049         case RXD_OPAQUE_RING_JUMBO:
3050                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3051                 dest_desc = &tp->rx_jumbo[dest_idx];
3052                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3053                 src_desc = &tp->rx_jumbo[src_idx];
3054                 src_map = &tp->rx_jumbo_buffers[src_idx];
3055                 break;
3056
3057         default:
3058                 return;
3059         };
3060
3061         dest_map->skb = src_map->skb;
3062         pci_unmap_addr_set(dest_map, mapping,
3063                            pci_unmap_addr(src_map, mapping));
3064         dest_desc->addr_hi = src_desc->addr_hi;
3065         dest_desc->addr_lo = src_desc->addr_lo;
3066
3067         src_map->skb = NULL;
3068 }
3069
3070 #if TG3_VLAN_TAG_USED
3071 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3072 {
3073         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3074 }
3075 #endif
3076
3077 /* The RX ring scheme is composed of multiple rings which post fresh
3078  * buffers to the chip, and one special ring the chip uses to report
3079  * status back to the host.
3080  *
3081  * The special ring reports the status of received packets to the
3082  * host.  The chip does not write into the original descriptor the
3083  * RX buffer was obtained from.  The chip simply takes the original
3084  * descriptor as provided by the host, updates the status and length
3085  * field, then writes this into the next status ring entry.
3086  *
3087  * Each ring the host uses to post buffers to the chip is described
3088  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3089  * it is first placed into the on-chip ram.  When the packet's length
3090  * is known, it walks down the TG3_BDINFO entries to select the ring.
3091  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3092  * which is within the range of the new packet's length is chosen.
3093  *
3094  * The "separate ring for rx status" scheme may sound queer, but it makes
3095  * sense from a cache coherency perspective.  If only the host writes
3096  * to the buffer post rings, and only the chip writes to the rx status
3097  * rings, then cache lines never move beyond shared-modified state.
3098  * If both the host and chip were to write into the same ring, cache line
3099  * eviction could occur since both entities want it in an exclusive state.
3100  */
3101 static int tg3_rx(struct tg3 *tp, int budget)
3102 {
3103         u32 work_mask;
3104         u32 sw_idx = tp->rx_rcb_ptr;
3105         u16 hw_idx;
3106         int received;
3107
3108         hw_idx = tp->hw_status->idx[0].rx_producer;
3109         /*
3110          * We need to order the read of hw_idx and the read of
3111          * the opaque cookie.
3112          */
3113         rmb();
3114         work_mask = 0;
3115         received = 0;
3116         while (sw_idx != hw_idx && budget > 0) {
3117                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3118                 unsigned int len;
3119                 struct sk_buff *skb;
3120                 dma_addr_t dma_addr;
3121                 u32 opaque_key, desc_idx, *post_ptr;
3122
3123                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3124                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3125                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3126                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3127                                                   mapping);
3128                         skb = tp->rx_std_buffers[desc_idx].skb;
3129                         post_ptr = &tp->rx_std_ptr;
3130                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3131                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3132                                                   mapping);
3133                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3134                         post_ptr = &tp->rx_jumbo_ptr;
3135                 }
3136                 else {
3137                         goto next_pkt_nopost;
3138                 }
3139
3140                 work_mask |= opaque_key;
3141
3142                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3143                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3144                 drop_it:
3145                         tg3_recycle_rx(tp, opaque_key,
3146                                        desc_idx, *post_ptr);
3147                 drop_it_no_recycle:
3148                         /* Other statistics kept track of by card. */
3149                         tp->net_stats.rx_dropped++;
3150                         goto next_pkt;
3151                 }
3152
3153                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3154
3155                 if (len > RX_COPY_THRESHOLD 
3156                         && tp->rx_offset == 2
3157                         /* rx_offset != 2 iff this is a 5701 card running
3158                          * in PCI-X mode [see tg3_get_invariants()] */
3159                 ) {
3160                         int skb_size;
3161
3162                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3163                                                     desc_idx, *post_ptr);
3164                         if (skb_size < 0)
3165                                 goto drop_it;
3166
3167                         pci_unmap_single(tp->pdev, dma_addr,
3168                                          skb_size - tp->rx_offset,
3169                                          PCI_DMA_FROMDEVICE);
3170
3171                         skb_put(skb, len);
3172                 } else {
3173                         struct sk_buff *copy_skb;
3174
3175                         tg3_recycle_rx(tp, opaque_key,
3176                                        desc_idx, *post_ptr);
3177
3178                         copy_skb = dev_alloc_skb(len + 2);
3179                         if (copy_skb == NULL)
3180                                 goto drop_it_no_recycle;
3181
3182                         copy_skb->dev = tp->dev;
3183                         skb_reserve(copy_skb, 2);
3184                         skb_put(copy_skb, len);
3185                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3186                         memcpy(copy_skb->data, skb->data, len);
3187                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3188
3189                         /* We'll reuse the original ring buffer. */
3190                         skb = copy_skb;
3191                 }
3192
3193                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3194                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3195                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3196                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3197                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3198                 else
3199                         skb->ip_summed = CHECKSUM_NONE;
3200
3201                 skb->protocol = eth_type_trans(skb, tp->dev);
3202 #if TG3_VLAN_TAG_USED
3203                 if (tp->vlgrp != NULL &&
3204                     desc->type_flags & RXD_FLAG_VLAN) {
3205                         tg3_vlan_rx(tp, skb,
3206                                     desc->err_vlan & RXD_VLAN_MASK);
3207                 } else
3208 #endif
3209                         netif_receive_skb(skb);
3210
3211                 tp->dev->last_rx = jiffies;
3212                 received++;
3213                 budget--;
3214
3215 next_pkt:
3216                 (*post_ptr)++;
3217 next_pkt_nopost:
3218                 sw_idx++;
3219                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3220
3221                 /* Refresh hw_idx to see if there is new work */
3222                 if (sw_idx == hw_idx) {
3223                         hw_idx = tp->hw_status->idx[0].rx_producer;
3224                         rmb();
3225                 }
3226         }
3227
3228         /* ACK the status ring. */
3229         tp->rx_rcb_ptr = sw_idx;
3230         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3231
3232         /* Refill RX ring(s). */
3233         if (work_mask & RXD_OPAQUE_RING_STD) {
3234                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3235                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3236                              sw_idx);
3237         }
3238         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3239                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3240                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3241                              sw_idx);
3242         }
3243         mmiowb();
3244
3245         return received;
3246 }
3247
3248 static int tg3_poll(struct net_device *netdev, int *budget)
3249 {
3250         struct tg3 *tp = netdev_priv(netdev);
3251         struct tg3_hw_status *sblk = tp->hw_status;
3252         int done;
3253
3254         /* handle link change and other phy events */
3255         if (!(tp->tg3_flags &
3256               (TG3_FLAG_USE_LINKCHG_REG |
3257                TG3_FLAG_POLL_SERDES))) {
3258                 if (sblk->status & SD_STATUS_LINK_CHG) {
3259                         sblk->status = SD_STATUS_UPDATED |
3260                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3261                         spin_lock(&tp->lock);
3262                         tg3_setup_phy(tp, 0);
3263                         spin_unlock(&tp->lock);
3264                 }
3265         }
3266
3267         /* run TX completion thread */
3268         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3269                 tg3_tx(tp);
3270         }
3271
3272         /* run RX thread, within the bounds set by NAPI.
3273          * All RX "locking" is done by ensuring outside
3274          * code synchronizes with dev->poll()
3275          */
3276         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3277                 int orig_budget = *budget;
3278                 int work_done;
3279
3280                 if (orig_budget > netdev->quota)
3281                         orig_budget = netdev->quota;
3282
3283                 work_done = tg3_rx(tp, orig_budget);
3284
3285                 *budget -= work_done;
3286                 netdev->quota -= work_done;
3287         }
3288
3289         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3290                 tp->last_tag = sblk->status_tag;
3291                 rmb();
3292         } else
3293                 sblk->status &= ~SD_STATUS_UPDATED;
3294
3295         /* if no more work, tell net stack and NIC we're done */
3296         done = !tg3_has_work(tp);
3297         if (done) {
3298                 netif_rx_complete(netdev);
3299                 tg3_restart_ints(tp);
3300         }
3301
3302         return (done ? 0 : 1);
3303 }
3304
3305 static void tg3_irq_quiesce(struct tg3 *tp)
3306 {
3307         BUG_ON(tp->irq_sync);
3308
3309         tp->irq_sync = 1;
3310         smp_mb();
3311
3312         synchronize_irq(tp->pdev->irq);
3313 }
3314
3315 static inline int tg3_irq_sync(struct tg3 *tp)
3316 {
3317         return tp->irq_sync;
3318 }
3319
3320 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3321  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3322  * with as well.  Most of the time, this is not necessary except when
3323  * shutting down the device.
3324  */
3325 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3326 {
3327         if (irq_sync)
3328                 tg3_irq_quiesce(tp);
3329         spin_lock_bh(&tp->lock);
3330         spin_lock(&tp->tx_lock);
3331 }
3332
3333 static inline void tg3_full_unlock(struct tg3 *tp)
3334 {
3335         spin_unlock(&tp->tx_lock);
3336         spin_unlock_bh(&tp->lock);
3337 }
3338
3339 /* MSI ISR - No need to check for interrupt sharing and no need to
3340  * flush status block and interrupt mailbox. PCI ordering rules
3341  * guarantee that MSI will arrive after the status block.
3342  */
3343 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3344 {
3345         struct net_device *dev = dev_id;
3346         struct tg3 *tp = netdev_priv(dev);
3347
3348         prefetch(tp->hw_status);
3349         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3350         /*
3351          * Writing any value to intr-mbox-0 clears PCI INTA# and
3352          * chip-internal interrupt pending events.
3353          * Writing non-zero to intr-mbox-0 additional tells the
3354          * NIC to stop sending us irqs, engaging "in-intr-handler"
3355          * event coalescing.
3356          */
3357         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3358         if (likely(!tg3_irq_sync(tp)))
3359                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3360
3361         return IRQ_RETVAL(1);
3362 }
3363
3364 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3365 {
3366         struct net_device *dev = dev_id;
3367         struct tg3 *tp = netdev_priv(dev);
3368         struct tg3_hw_status *sblk = tp->hw_status;
3369         unsigned int handled = 1;
3370
3371         /* In INTx mode, it is possible for the interrupt to arrive at
3372          * the CPU before the status block posted prior to the interrupt.
3373          * Reading the PCI State register will confirm whether the
3374          * interrupt is ours and will flush the status block.
3375          */
3376         if ((sblk->status & SD_STATUS_UPDATED) ||
3377             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3378                 /*
3379                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3380                  * chip-internal interrupt pending events.
3381                  * Writing non-zero to intr-mbox-0 additional tells the
3382                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3383                  * event coalescing.
3384                  */
3385                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3386                              0x00000001);
3387                 if (tg3_irq_sync(tp))
3388                         goto out;
3389                 sblk->status &= ~SD_STATUS_UPDATED;
3390                 if (likely(tg3_has_work(tp))) {
3391                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3392                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3393                 } else {
3394                         /* No work, shared interrupt perhaps?  re-enable
3395                          * interrupts, and flush that PCI write
3396                          */
3397                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3398                                 0x00000000);
3399                 }
3400         } else {        /* shared interrupt */
3401                 handled = 0;
3402         }
3403 out:
3404         return IRQ_RETVAL(handled);
3405 }
3406
3407 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3408 {
3409         struct net_device *dev = dev_id;
3410         struct tg3 *tp = netdev_priv(dev);
3411         struct tg3_hw_status *sblk = tp->hw_status;
3412         unsigned int handled = 1;
3413
3414         /* In INTx mode, it is possible for the interrupt to arrive at
3415          * the CPU before the status block posted prior to the interrupt.
3416          * Reading the PCI State register will confirm whether the
3417          * interrupt is ours and will flush the status block.
3418          */
3419         if ((sblk->status_tag != tp->last_tag) ||
3420             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3421                 /*
3422                  * writing any value to intr-mbox-0 clears PCI INTA# and
3423                  * chip-internal interrupt pending events.
3424                  * writing non-zero to intr-mbox-0 additional tells the
3425                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3426                  * event coalescing.
3427                  */
3428                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3429                              0x00000001);
3430                 if (tg3_irq_sync(tp))
3431                         goto out;
3432                 if (netif_rx_schedule_prep(dev)) {
3433                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3434                         /* Update last_tag to mark that this status has been
3435                          * seen. Because interrupt may be shared, we may be
3436                          * racing with tg3_poll(), so only update last_tag
3437                          * if tg3_poll() is not scheduled.
3438                          */
3439                         tp->last_tag = sblk->status_tag;
3440                         __netif_rx_schedule(dev);
3441                 }
3442         } else {        /* shared interrupt */
3443                 handled = 0;
3444         }
3445 out:
3446         return IRQ_RETVAL(handled);
3447 }
3448
3449 /* ISR for interrupt test */
3450 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3451                 struct pt_regs *regs)
3452 {
3453         struct net_device *dev = dev_id;
3454         struct tg3 *tp = netdev_priv(dev);
3455         struct tg3_hw_status *sblk = tp->hw_status;
3456
3457         if ((sblk->status & SD_STATUS_UPDATED) ||
3458             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3459                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3460                              0x00000001);
3461                 return IRQ_RETVAL(1);
3462         }
3463         return IRQ_RETVAL(0);
3464 }
3465
3466 static int tg3_init_hw(struct tg3 *);
3467 static int tg3_halt(struct tg3 *, int, int);
3468
3469 #ifdef CONFIG_NET_POLL_CONTROLLER
3470 static void tg3_poll_controller(struct net_device *dev)
3471 {
3472         struct tg3 *tp = netdev_priv(dev);
3473
3474         tg3_interrupt(tp->pdev->irq, dev, NULL);
3475 }
3476 #endif
3477
3478 static void tg3_reset_task(void *_data)
3479 {
3480         struct tg3 *tp = _data;
3481         unsigned int restart_timer;
3482
3483         tg3_netif_stop(tp);
3484
3485         tg3_full_lock(tp, 1);
3486
3487         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3488         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3489
3490         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3491         tg3_init_hw(tp);
3492
3493         tg3_netif_start(tp);
3494
3495         tg3_full_unlock(tp);
3496
3497         if (restart_timer)
3498                 mod_timer(&tp->timer, jiffies + 1);
3499 }
3500
3501 static void tg3_tx_timeout(struct net_device *dev)
3502 {
3503         struct tg3 *tp = netdev_priv(dev);
3504
3505         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3506                dev->name);
3507
3508         schedule_work(&tp->reset_task);
3509 }
3510
3511 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3512 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3513 {
3514         u32 base = (u32) mapping & 0xffffffff;
3515
3516         return ((base > 0xffffdcc0) &&
3517                 (base + len + 8 < base));
3518 }
3519
3520 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3521
3522 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3523                                        u32 last_plus_one, u32 *start,
3524                                        u32 base_flags, u32 mss)
3525 {
3526         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3527         dma_addr_t new_addr = 0;
3528         u32 entry = *start;
3529         int i, ret = 0;
3530
3531         if (!new_skb) {
3532                 ret = -1;
3533         } else {
3534                 /* New SKB is guaranteed to be linear. */
3535                 entry = *start;
3536                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3537                                           PCI_DMA_TODEVICE);
3538                 /* Make sure new skb does not cross any 4G boundaries.
3539                  * Drop the packet if it does.
3540                  */
3541                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3542                         ret = -1;
3543                         dev_kfree_skb(new_skb);
3544                         new_skb = NULL;
3545                 } else {
3546                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3547                                     base_flags, 1 | (mss << 1));
3548                         *start = NEXT_TX(entry);
3549                 }
3550         }
3551
3552         /* Now clean up the sw ring entries. */
3553         i = 0;
3554         while (entry != last_plus_one) {
3555                 int len;
3556
3557                 if (i == 0)
3558                         len = skb_headlen(skb);
3559                 else
3560                         len = skb_shinfo(skb)->frags[i-1].size;
3561                 pci_unmap_single(tp->pdev,
3562                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3563                                  len, PCI_DMA_TODEVICE);
3564                 if (i == 0) {
3565                         tp->tx_buffers[entry].skb = new_skb;
3566                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3567                 } else {
3568                         tp->tx_buffers[entry].skb = NULL;
3569                 }
3570                 entry = NEXT_TX(entry);
3571                 i++;
3572         }
3573
3574         dev_kfree_skb(skb);
3575
3576         return ret;
3577 }
3578
3579 static void tg3_set_txd(struct tg3 *tp, int entry,
3580                         dma_addr_t mapping, int len, u32 flags,
3581                         u32 mss_and_is_end)
3582 {
3583         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3584         int is_end = (mss_and_is_end & 0x1);
3585         u32 mss = (mss_and_is_end >> 1);
3586         u32 vlan_tag = 0;
3587
3588         if (is_end)
3589                 flags |= TXD_FLAG_END;
3590         if (flags & TXD_FLAG_VLAN) {
3591                 vlan_tag = flags >> 16;
3592                 flags &= 0xffff;
3593         }
3594         vlan_tag |= (mss << TXD_MSS_SHIFT);
3595
3596         txd->addr_hi = ((u64) mapping >> 32);
3597         txd->addr_lo = ((u64) mapping & 0xffffffff);
3598         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3599         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3600 }
3601
3602 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3603 {
3604         struct tg3 *tp = netdev_priv(dev);
3605         dma_addr_t mapping;
3606         u32 len, entry, base_flags, mss;
3607         int would_hit_hwbug;
3608
3609         len = skb_headlen(skb);
3610
3611         /* No BH disabling for tx_lock here.  We are running in BH disabled
3612          * context and TX reclaim runs via tp->poll inside of a software
3613          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3614          * no IRQ context deadlocks to worry about either.  Rejoice!
3615          */
3616         if (!spin_trylock(&tp->tx_lock))
3617                 return NETDEV_TX_LOCKED; 
3618
3619         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3620                 if (!netif_queue_stopped(dev)) {
3621                         netif_stop_queue(dev);
3622
3623                         /* This is a hard error, log it. */
3624                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3625                                "queue awake!\n", dev->name);
3626                 }
3627                 spin_unlock(&tp->tx_lock);
3628                 return NETDEV_TX_BUSY;
3629         }
3630
3631         entry = tp->tx_prod;
3632         base_flags = 0;
3633         if (skb->ip_summed == CHECKSUM_HW)
3634                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3635 #if TG3_TSO_SUPPORT != 0
3636         mss = 0;
3637         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3638             (mss = skb_shinfo(skb)->tso_size) != 0) {
3639                 int tcp_opt_len, ip_tcp_len;
3640
3641                 if (skb_header_cloned(skb) &&
3642                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3643                         dev_kfree_skb(skb);
3644                         goto out_unlock;
3645                 }
3646
3647                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3648                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3649
3650                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3651                                TXD_FLAG_CPU_POST_DMA);
3652
3653                 skb->nh.iph->check = 0;
3654                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3655                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3656                         skb->h.th->check = 0;
3657                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3658                 }
3659                 else {
3660                         skb->h.th->check =
3661                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3662                                                    skb->nh.iph->daddr,
3663                                                    0, IPPROTO_TCP, 0);
3664                 }
3665
3666                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3667                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3668                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3669                                 int tsflags;
3670
3671                                 tsflags = ((skb->nh.iph->ihl - 5) +
3672                                            (tcp_opt_len >> 2));
3673                                 mss |= (tsflags << 11);
3674                         }
3675                 } else {
3676                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3677                                 int tsflags;
3678
3679                                 tsflags = ((skb->nh.iph->ihl - 5) +
3680                                            (tcp_opt_len >> 2));
3681                                 base_flags |= tsflags << 12;
3682                         }
3683                 }
3684         }
3685 #else
3686         mss = 0;
3687 #endif
3688 #if TG3_VLAN_TAG_USED
3689         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3690                 base_flags |= (TXD_FLAG_VLAN |
3691                                (vlan_tx_tag_get(skb) << 16));
3692 #endif
3693
3694         /* Queue skb data, a.k.a. the main skb fragment. */
3695         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3696
3697         tp->tx_buffers[entry].skb = skb;
3698         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3699
3700         would_hit_hwbug = 0;
3701
3702         if (tg3_4g_overflow_test(mapping, len))
3703                 would_hit_hwbug = 1;
3704
3705         tg3_set_txd(tp, entry, mapping, len, base_flags,
3706                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3707
3708         entry = NEXT_TX(entry);
3709
3710         /* Now loop through additional data fragments, and queue them. */
3711         if (skb_shinfo(skb)->nr_frags > 0) {
3712                 unsigned int i, last;
3713
3714                 last = skb_shinfo(skb)->nr_frags - 1;
3715                 for (i = 0; i <= last; i++) {
3716                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3717
3718                         len = frag->size;
3719                         mapping = pci_map_page(tp->pdev,
3720                                                frag->page,
3721                                                frag->page_offset,
3722                                                len, PCI_DMA_TODEVICE);
3723
3724                         tp->tx_buffers[entry].skb = NULL;
3725                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3726
3727                         if (tg3_4g_overflow_test(mapping, len))
3728                                 would_hit_hwbug = 1;
3729
3730                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3731                                 tg3_set_txd(tp, entry, mapping, len,
3732                                             base_flags, (i == last)|(mss << 1));
3733                         else
3734                                 tg3_set_txd(tp, entry, mapping, len,
3735                                             base_flags, (i == last));
3736
3737                         entry = NEXT_TX(entry);
3738                 }
3739         }
3740
3741         if (would_hit_hwbug) {
3742                 u32 last_plus_one = entry;
3743                 u32 start;
3744
3745                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3746                 start &= (TG3_TX_RING_SIZE - 1);
3747
3748                 /* If the workaround fails due to memory/mapping
3749                  * failure, silently drop this packet.
3750                  */
3751                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3752                                                 &start, base_flags, mss))
3753                         goto out_unlock;
3754
3755                 entry = start;
3756         }
3757
3758         /* Packets are ready, update Tx producer idx local and on card. */
3759         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3760
3761         tp->tx_prod = entry;
3762         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3763                 netif_stop_queue(dev);
3764                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3765                         netif_wake_queue(tp->dev);
3766         }
3767
3768 out_unlock:
3769         mmiowb();
3770         spin_unlock(&tp->tx_lock);
3771
3772         dev->trans_start = jiffies;
3773
3774         return NETDEV_TX_OK;
3775 }
3776
3777 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3778                                int new_mtu)
3779 {
3780         dev->mtu = new_mtu;
3781
3782         if (new_mtu > ETH_DATA_LEN) {
3783                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3784                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3785                         ethtool_op_set_tso(dev, 0);
3786                 }
3787                 else
3788                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3789         } else {
3790                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3791                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3792                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3793         }
3794 }
3795
3796 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3797 {
3798         struct tg3 *tp = netdev_priv(dev);
3799
3800         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3801                 return -EINVAL;
3802
3803         if (!netif_running(dev)) {
3804                 /* We'll just catch it later when the
3805                  * device is up'd.
3806                  */
3807                 tg3_set_mtu(dev, tp, new_mtu);
3808                 return 0;
3809         }
3810
3811         tg3_netif_stop(tp);
3812
3813         tg3_full_lock(tp, 1);
3814
3815         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3816
3817         tg3_set_mtu(dev, tp, new_mtu);
3818
3819         tg3_init_hw(tp);
3820
3821         tg3_netif_start(tp);
3822
3823         tg3_full_unlock(tp);
3824
3825         return 0;
3826 }
3827
3828 /* Free up pending packets in all rx/tx rings.
3829  *
3830  * The chip has been shut down and the driver detached from
3831  * the networking, so no interrupts or new tx packets will
3832  * end up in the driver.  tp->{tx,}lock is not held and we are not
3833  * in an interrupt context and thus may sleep.
3834  */
3835 static void tg3_free_rings(struct tg3 *tp)
3836 {
3837         struct ring_info *rxp;
3838         int i;
3839
3840         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3841                 rxp = &tp->rx_std_buffers[i];
3842
3843                 if (rxp->skb == NULL)
3844                         continue;
3845                 pci_unmap_single(tp->pdev,
3846                                  pci_unmap_addr(rxp, mapping),
3847                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3848                                  PCI_DMA_FROMDEVICE);
3849                 dev_kfree_skb_any(rxp->skb);
3850                 rxp->skb = NULL;
3851         }
3852
3853         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3854                 rxp = &tp->rx_jumbo_buffers[i];
3855
3856                 if (rxp->skb == NULL)
3857                         continue;
3858                 pci_unmap_single(tp->pdev,
3859                                  pci_unmap_addr(rxp, mapping),
3860                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3861                                  PCI_DMA_FROMDEVICE);
3862                 dev_kfree_skb_any(rxp->skb);
3863                 rxp->skb = NULL;
3864         }
3865
3866         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3867                 struct tx_ring_info *txp;
3868                 struct sk_buff *skb;
3869                 int j;
3870
3871                 txp = &tp->tx_buffers[i];
3872                 skb = txp->skb;
3873
3874                 if (skb == NULL) {
3875                         i++;
3876                         continue;
3877                 }
3878
3879                 pci_unmap_single(tp->pdev,
3880                                  pci_unmap_addr(txp, mapping),
3881                                  skb_headlen(skb),
3882                                  PCI_DMA_TODEVICE);
3883                 txp->skb = NULL;
3884
3885                 i++;
3886
3887                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3888                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3889                         pci_unmap_page(tp->pdev,
3890                                        pci_unmap_addr(txp, mapping),
3891                                        skb_shinfo(skb)->frags[j].size,
3892                                        PCI_DMA_TODEVICE);
3893                         i++;
3894                 }
3895
3896                 dev_kfree_skb_any(skb);
3897         }
3898 }
3899
3900 /* Initialize tx/rx rings for packet processing.
3901  *
3902  * The chip has been shut down and the driver detached from
3903  * the networking, so no interrupts or new tx packets will
3904  * end up in the driver.  tp->{tx,}lock are held and thus
3905  * we may not sleep.
3906  */
3907 static void tg3_init_rings(struct tg3 *tp)
3908 {
3909         u32 i;
3910
3911         /* Free up all the SKBs. */
3912         tg3_free_rings(tp);
3913
3914         /* Zero out all descriptors. */
3915         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3916         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3917         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3918         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3919
3920         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3921         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3922             (tp->dev->mtu > ETH_DATA_LEN))
3923                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3924
3925         /* Initialize invariants of the rings, we only set this
3926          * stuff once.  This works because the card does not
3927          * write into the rx buffer posting rings.
3928          */
3929         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3930                 struct tg3_rx_buffer_desc *rxd;
3931
3932                 rxd = &tp->rx_std[i];
3933                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3934                         << RXD_LEN_SHIFT;
3935                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3936                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3937                                (i << RXD_OPAQUE_INDEX_SHIFT));
3938         }
3939
3940         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3941                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3942                         struct tg3_rx_buffer_desc *rxd;
3943
3944                         rxd = &tp->rx_jumbo[i];
3945                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3946                                 << RXD_LEN_SHIFT;
3947                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3948                                 RXD_FLAG_JUMBO;
3949                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3950                                (i << RXD_OPAQUE_INDEX_SHIFT));
3951                 }
3952         }
3953
3954         /* Now allocate fresh SKBs for each rx ring. */
3955         for (i = 0; i < tp->rx_pending; i++) {
3956                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3957                                      -1, i) < 0)
3958                         break;
3959         }
3960
3961         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3962                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3963                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3964                                              -1, i) < 0)
3965                                 break;
3966                 }
3967         }
3968 }
3969
3970 /*
3971  * Must not be invoked with interrupt sources disabled and
3972  * the hardware shutdown down.
3973  */
3974 static void tg3_free_consistent(struct tg3 *tp)
3975 {
3976         kfree(tp->rx_std_buffers);
3977         tp->rx_std_buffers = NULL;
3978         if (tp->rx_std) {
3979                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3980                                     tp->rx_std, tp->rx_std_mapping);
3981                 tp->rx_std = NULL;
3982         }
3983         if (tp->rx_jumbo) {
3984                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3985                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3986                 tp->rx_jumbo = NULL;
3987         }
3988         if (tp->rx_rcb) {
3989                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3990                                     tp->rx_rcb, tp->rx_rcb_mapping);
3991                 tp->rx_rcb = NULL;
3992         }
3993         if (tp->tx_ring) {
3994                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3995                         tp->tx_ring, tp->tx_desc_mapping);
3996                 tp->tx_ring = NULL;
3997         }
3998         if (tp->hw_status) {
3999                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4000                                     tp->hw_status, tp->status_mapping);
4001                 tp->hw_status = NULL;
4002         }
4003         if (tp->hw_stats) {
4004                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4005                                     tp->hw_stats, tp->stats_mapping);
4006                 tp->hw_stats = NULL;
4007         }
4008 }
4009
4010 /*
4011  * Must not be invoked with interrupt sources disabled and
4012  * the hardware shutdown down.  Can sleep.
4013  */
4014 static int tg3_alloc_consistent(struct tg3 *tp)
4015 {
4016         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4017                                       (TG3_RX_RING_SIZE +
4018                                        TG3_RX_JUMBO_RING_SIZE)) +
4019                                      (sizeof(struct tx_ring_info) *
4020                                       TG3_TX_RING_SIZE),
4021                                      GFP_KERNEL);
4022         if (!tp->rx_std_buffers)
4023                 return -ENOMEM;
4024
4025         memset(tp->rx_std_buffers, 0,
4026                (sizeof(struct ring_info) *
4027                 (TG3_RX_RING_SIZE +
4028                  TG3_RX_JUMBO_RING_SIZE)) +
4029                (sizeof(struct tx_ring_info) *
4030                 TG3_TX_RING_SIZE));
4031
4032         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4033         tp->tx_buffers = (struct tx_ring_info *)
4034                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4035
4036         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4037                                           &tp->rx_std_mapping);
4038         if (!tp->rx_std)
4039                 goto err_out;
4040
4041         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4042                                             &tp->rx_jumbo_mapping);
4043
4044         if (!tp->rx_jumbo)
4045                 goto err_out;
4046
4047         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4048                                           &tp->rx_rcb_mapping);
4049         if (!tp->rx_rcb)
4050                 goto err_out;
4051
4052         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4053                                            &tp->tx_desc_mapping);
4054         if (!tp->tx_ring)
4055                 goto err_out;
4056
4057         tp->hw_status = pci_alloc_consistent(tp->pdev,
4058                                              TG3_HW_STATUS_SIZE,
4059                                              &tp->status_mapping);
4060         if (!tp->hw_status)
4061                 goto err_out;
4062
4063         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4064                                             sizeof(struct tg3_hw_stats),
4065                                             &tp->stats_mapping);
4066         if (!tp->hw_stats)
4067                 goto err_out;
4068
4069         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4070         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4071
4072         return 0;
4073
4074 err_out:
4075         tg3_free_consistent(tp);
4076         return -ENOMEM;
4077 }
4078
4079 #define MAX_WAIT_CNT 1000
4080
4081 /* To stop a block, clear the enable bit and poll till it
4082  * clears.  tp->lock is held.
4083  */
4084 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4085 {
4086         unsigned int i;
4087         u32 val;
4088
4089         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4090                 switch (ofs) {
4091                 case RCVLSC_MODE:
4092                 case DMAC_MODE:
4093                 case MBFREE_MODE:
4094                 case BUFMGR_MODE:
4095                 case MEMARB_MODE:
4096                         /* We can't enable/disable these bits of the
4097                          * 5705/5750, just say success.
4098                          */
4099                         return 0;
4100
4101                 default:
4102                         break;
4103                 };
4104         }
4105
4106         val = tr32(ofs);
4107         val &= ~enable_bit;
4108         tw32_f(ofs, val);
4109
4110         for (i = 0; i < MAX_WAIT_CNT; i++) {
4111                 udelay(100);
4112                 val = tr32(ofs);
4113                 if ((val & enable_bit) == 0)
4114                         break;
4115         }
4116
4117         if (i == MAX_WAIT_CNT && !silent) {
4118                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4119                        "ofs=%lx enable_bit=%x\n",
4120                        ofs, enable_bit);
4121                 return -ENODEV;
4122         }
4123
4124         return 0;
4125 }
4126
4127 /* tp->lock is held. */
4128 static int tg3_abort_hw(struct tg3 *tp, int silent)
4129 {
4130         int i, err;
4131
4132         tg3_disable_ints(tp);
4133
4134         tp->rx_mode &= ~RX_MODE_ENABLE;
4135         tw32_f(MAC_RX_MODE, tp->rx_mode);
4136         udelay(10);
4137
4138         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4139         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4140         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4141         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4142         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4143         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4144
4145         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4146         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4147         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4148         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4149         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4150         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4151         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4152
4153         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4154         tw32_f(MAC_MODE, tp->mac_mode);
4155         udelay(40);
4156
4157         tp->tx_mode &= ~TX_MODE_ENABLE;
4158         tw32_f(MAC_TX_MODE, tp->tx_mode);
4159
4160         for (i = 0; i < MAX_WAIT_CNT; i++) {
4161                 udelay(100);
4162                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4163                         break;
4164         }
4165         if (i >= MAX_WAIT_CNT) {
4166                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4167                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4168                        tp->dev->name, tr32(MAC_TX_MODE));
4169                 err |= -ENODEV;
4170         }
4171
4172         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4173         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4174         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4175
4176         tw32(FTQ_RESET, 0xffffffff);
4177         tw32(FTQ_RESET, 0x00000000);
4178
4179         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4180         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4181
4182         if (tp->hw_status)
4183                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4184         if (tp->hw_stats)
4185                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4186
4187         return err;
4188 }
4189
4190 /* tp->lock is held. */
4191 static int tg3_nvram_lock(struct tg3 *tp)
4192 {
4193         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4194                 int i;
4195
4196                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4197                 for (i = 0; i < 8000; i++) {
4198                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4199                                 break;
4200                         udelay(20);
4201                 }
4202                 if (i == 8000)
4203                         return -ENODEV;
4204         }
4205         return 0;
4206 }
4207
4208 /* tp->lock is held. */
4209 static void tg3_nvram_unlock(struct tg3 *tp)
4210 {
4211         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4212                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4213 }
4214
4215 /* tp->lock is held. */
4216 static void tg3_enable_nvram_access(struct tg3 *tp)
4217 {
4218         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4219             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4220                 u32 nvaccess = tr32(NVRAM_ACCESS);
4221
4222                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4223         }
4224 }
4225
4226 /* tp->lock is held. */
4227 static void tg3_disable_nvram_access(struct tg3 *tp)
4228 {
4229         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4230             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4231                 u32 nvaccess = tr32(NVRAM_ACCESS);
4232
4233                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4234         }
4235 }
4236
4237 /* tp->lock is held. */
4238 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4239 {
4240         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4241                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4242                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4243
4244         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4245                 switch (kind) {
4246                 case RESET_KIND_INIT:
4247                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4248                                       DRV_STATE_START);
4249                         break;
4250
4251                 case RESET_KIND_SHUTDOWN:
4252                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4253                                       DRV_STATE_UNLOAD);
4254                         break;
4255
4256                 case RESET_KIND_SUSPEND:
4257                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4258                                       DRV_STATE_SUSPEND);
4259                         break;
4260
4261                 default:
4262                         break;
4263                 };
4264         }
4265 }
4266
4267 /* tp->lock is held. */
4268 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4269 {
4270         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4271                 switch (kind) {
4272                 case RESET_KIND_INIT:
4273                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4274                                       DRV_STATE_START_DONE);
4275                         break;
4276
4277                 case RESET_KIND_SHUTDOWN:
4278                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4279                                       DRV_STATE_UNLOAD_DONE);
4280                         break;
4281
4282                 default:
4283                         break;
4284                 };
4285         }
4286 }
4287
4288 /* tp->lock is held. */
4289 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4290 {
4291         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4292                 switch (kind) {
4293                 case RESET_KIND_INIT:
4294                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4295                                       DRV_STATE_START);
4296                         break;
4297
4298                 case RESET_KIND_SHUTDOWN:
4299                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4300                                       DRV_STATE_UNLOAD);
4301                         break;
4302
4303                 case RESET_KIND_SUSPEND:
4304                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4305                                       DRV_STATE_SUSPEND);
4306                         break;
4307
4308                 default:
4309                         break;
4310                 };
4311         }
4312 }
4313
4314 static void tg3_stop_fw(struct tg3 *);
4315
4316 /* tp->lock is held. */
4317 static int tg3_chip_reset(struct tg3 *tp)
4318 {
4319         u32 val;
4320         void (*write_op)(struct tg3 *, u32, u32);
4321         int i;
4322
4323         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4324                 tg3_nvram_lock(tp);
4325
4326         /*
4327          * We must avoid the readl() that normally takes place.
4328          * It locks machines, causes machine checks, and other
4329          * fun things.  So, temporarily disable the 5701
4330          * hardware workaround, while we do the reset.
4331          */
4332         write_op = tp->write32;
4333         if (write_op == tg3_write_flush_reg32)
4334                 tp->write32 = tg3_write32;
4335
4336         /* do the reset */
4337         val = GRC_MISC_CFG_CORECLK_RESET;
4338
4339         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4340                 if (tr32(0x7e2c) == 0x60) {
4341                         tw32(0x7e2c, 0x20);
4342                 }
4343                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4344                         tw32(GRC_MISC_CFG, (1 << 29));
4345                         val |= (1 << 29);
4346                 }
4347         }
4348
4349         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4350                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4351         tw32(GRC_MISC_CFG, val);
4352
4353         /* restore 5701 hardware bug workaround write method */
4354         tp->write32 = write_op;
4355
4356         /* Unfortunately, we have to delay before the PCI read back.
4357          * Some 575X chips even will not respond to a PCI cfg access
4358          * when the reset command is given to the chip.
4359          *
4360          * How do these hardware designers expect things to work
4361          * properly if the PCI write is posted for a long period
4362          * of time?  It is always necessary to have some method by
4363          * which a register read back can occur to push the write
4364          * out which does the reset.
4365          *
4366          * For most tg3 variants the trick below was working.
4367          * Ho hum...
4368          */
4369         udelay(120);
4370
4371         /* Flush PCI posted writes.  The normal MMIO registers
4372          * are inaccessible at this time so this is the only
4373          * way to make this reliably (actually, this is no longer
4374          * the case, see above).  I tried to use indirect
4375          * register read/write but this upset some 5701 variants.
4376          */
4377         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4378
4379         udelay(120);
4380
4381         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4382                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4383                         int i;
4384                         u32 cfg_val;
4385
4386                         /* Wait for link training to complete.  */
4387                         for (i = 0; i < 5000; i++)
4388                                 udelay(100);
4389
4390                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4391                         pci_write_config_dword(tp->pdev, 0xc4,
4392                                                cfg_val | (1 << 15));
4393                 }
4394                 /* Set PCIE max payload size and clear error status.  */
4395                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4396         }
4397
4398         /* Re-enable indirect register accesses. */
4399         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4400                                tp->misc_host_ctrl);
4401
4402         /* Set MAX PCI retry to zero. */
4403         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4404         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4405             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4406                 val |= PCISTATE_RETRY_SAME_DMA;
4407         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4408
4409         pci_restore_state(tp->pdev);
4410
4411         /* Make sure PCI-X relaxed ordering bit is clear. */
4412         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4413         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4414         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4415
4416         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4417                 u32 val;
4418
4419                 /* Chip reset on 5780 will reset MSI enable bit,
4420                  * so need to restore it.
4421                  */
4422                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4423                         u16 ctrl;
4424
4425                         pci_read_config_word(tp->pdev,
4426                                              tp->msi_cap + PCI_MSI_FLAGS,
4427                                              &ctrl);
4428                         pci_write_config_word(tp->pdev,
4429                                               tp->msi_cap + PCI_MSI_FLAGS,
4430                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4431                         val = tr32(MSGINT_MODE);
4432                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4433                 }
4434
4435                 val = tr32(MEMARB_MODE);
4436                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4437
4438         } else
4439                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4440
4441         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4442                 tg3_stop_fw(tp);
4443                 tw32(0x5000, 0x400);
4444         }
4445
4446         tw32(GRC_MODE, tp->grc_mode);
4447
4448         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4449                 u32 val = tr32(0xc4);
4450
4451                 tw32(0xc4, val | (1 << 15));
4452         }
4453
4454         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4455             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4456                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4457                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4458                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4459                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4460         }
4461
4462         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4463                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4464                 tw32_f(MAC_MODE, tp->mac_mode);
4465         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4466                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4467                 tw32_f(MAC_MODE, tp->mac_mode);
4468         } else
4469                 tw32_f(MAC_MODE, 0);
4470         udelay(40);
4471
4472         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4473                 /* Wait for firmware initialization to complete. */
4474                 for (i = 0; i < 100000; i++) {
4475                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4476                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4477                                 break;
4478                         udelay(10);
4479                 }
4480                 if (i >= 100000) {
4481                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4482                                "firmware will not restart magic=%08x\n",
4483                                tp->dev->name, val);
4484                         return -ENODEV;
4485                 }
4486         }
4487
4488         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4489             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4490                 u32 val = tr32(0x7c00);
4491
4492                 tw32(0x7c00, val | (1 << 25));
4493         }
4494
4495         /* Reprobe ASF enable state.  */
4496         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4497         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4498         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4499         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4500                 u32 nic_cfg;
4501
4502                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4503                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4504                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4505                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4506                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4507                 }
4508         }
4509
4510         return 0;
4511 }
4512
4513 /* tp->lock is held. */
4514 static void tg3_stop_fw(struct tg3 *tp)
4515 {
4516         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4517                 u32 val;
4518                 int i;
4519
4520                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4521                 val = tr32(GRC_RX_CPU_EVENT);
4522                 val |= (1 << 14);
4523                 tw32(GRC_RX_CPU_EVENT, val);
4524
4525                 /* Wait for RX cpu to ACK the event.  */
4526                 for (i = 0; i < 100; i++) {
4527                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4528                                 break;
4529                         udelay(1);
4530                 }
4531         }
4532 }
4533
4534 /* tp->lock is held. */
4535 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4536 {
4537         int err;
4538
4539         tg3_stop_fw(tp);
4540
4541         tg3_write_sig_pre_reset(tp, kind);
4542
4543         tg3_abort_hw(tp, silent);
4544         err = tg3_chip_reset(tp);
4545
4546         tg3_write_sig_legacy(tp, kind);
4547         tg3_write_sig_post_reset(tp, kind);
4548
4549         if (err)
4550                 return err;
4551
4552         return 0;
4553 }
4554
4555 #define TG3_FW_RELEASE_MAJOR    0x0
4556 #define TG3_FW_RELASE_MINOR     0x0
4557 #define TG3_FW_RELEASE_FIX      0x0
4558 #define TG3_FW_START_ADDR       0x08000000
4559 #define TG3_FW_TEXT_ADDR        0x08000000
4560 #define TG3_FW_TEXT_LEN         0x9c0
4561 #define TG3_FW_RODATA_ADDR      0x080009c0
4562 #define TG3_FW_RODATA_LEN       0x60
4563 #define TG3_FW_DATA_ADDR        0x08000a40
4564 #define TG3_FW_DATA_LEN         0x20
4565 #define TG3_FW_SBSS_ADDR        0x08000a60
4566 #define TG3_FW_SBSS_LEN         0xc
4567 #define TG3_FW_BSS_ADDR         0x08000a70
4568 #define TG3_FW_BSS_LEN          0x10
4569
4570 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4571         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4572         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4573         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4574         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4575         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4576         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4577         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4578         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4579         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4580         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4581         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4582         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4583         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4584         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4585         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4586         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4587         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4588         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4589         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4590         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4591         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4592         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4593         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4594         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4595         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4596         0, 0, 0, 0, 0, 0,
4597         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4598         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4599         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4600         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4601         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4602         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4603         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4604         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4605         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4606         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4607         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4608         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4609         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4610         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4611         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4612         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4613         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4614         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4615         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4616         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4617         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4618         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4619         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4620         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4621         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4622         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4623         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4624         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4625         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4626         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4627         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4628         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4629         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4630         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4631         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4632         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4633         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4634         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4635         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4636         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4637         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4638         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4639         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4640         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4641         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4642         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4643         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4644         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4645         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4646         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4647         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4648         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4649         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4650         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4651         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4652         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4653         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4654         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4655         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4656         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4657         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4658         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4659         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4660         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4661         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4662 };
4663
4664 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4665         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4666         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4667         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4668         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4669         0x00000000
4670 };
4671
4672 #if 0 /* All zeros, don't eat up space with it. */
4673 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4674         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4675         0x00000000, 0x00000000, 0x00000000, 0x00000000
4676 };
4677 #endif
4678
4679 #define RX_CPU_SCRATCH_BASE     0x30000
4680 #define RX_CPU_SCRATCH_SIZE     0x04000
4681 #define TX_CPU_SCRATCH_BASE     0x34000
4682 #define TX_CPU_SCRATCH_SIZE     0x04000
4683
4684 /* tp->lock is held. */
4685 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4686 {
4687         int i;
4688
4689         if (offset == TX_CPU_BASE &&
4690             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4691                 BUG();
4692
4693         if (offset == RX_CPU_BASE) {
4694                 for (i = 0; i < 10000; i++) {
4695                         tw32(offset + CPU_STATE, 0xffffffff);
4696                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4697                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4698                                 break;
4699                 }
4700
4701                 tw32(offset + CPU_STATE, 0xffffffff);
4702                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4703                 udelay(10);
4704         } else {
4705                 for (i = 0; i < 10000; i++) {
4706                         tw32(offset + CPU_STATE, 0xffffffff);
4707                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4708                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4709                                 break;
4710                 }
4711         }
4712
4713         if (i >= 10000) {
4714                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4715                        "and %s CPU\n",
4716                        tp->dev->name,
4717                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4718                 return -ENODEV;
4719         }
4720         return 0;
4721 }
4722
4723 struct fw_info {
4724         unsigned int text_base;
4725         unsigned int text_len;
4726         u32 *text_data;
4727         unsigned int rodata_base;
4728         unsigned int rodata_len;
4729         u32 *rodata_data;
4730         unsigned int data_base;
4731         unsigned int data_len;
4732         u32 *data_data;
4733 };
4734
4735 /* tp->lock is held. */
4736 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4737                                  int cpu_scratch_size, struct fw_info *info)
4738 {
4739         int err, i;
4740         void (*write_op)(struct tg3 *, u32, u32);
4741
4742         if (cpu_base == TX_CPU_BASE &&
4743             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4744                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4745                        "TX cpu firmware on %s which is 5705.\n",
4746                        tp->dev->name);
4747                 return -EINVAL;
4748         }
4749
4750         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4751                 write_op = tg3_write_mem;
4752         else
4753                 write_op = tg3_write_indirect_reg32;
4754
4755         /* It is possible that bootcode is still loading at this point.
4756          * Get the nvram lock first before halting the cpu.
4757          */
4758         tg3_nvram_lock(tp);
4759         err = tg3_halt_cpu(tp, cpu_base);
4760         tg3_nvram_unlock(tp);
4761         if (err)
4762                 goto out;
4763
4764         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4765                 write_op(tp, cpu_scratch_base + i, 0);
4766         tw32(cpu_base + CPU_STATE, 0xffffffff);
4767         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4768         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4769                 write_op(tp, (cpu_scratch_base +
4770                               (info->text_base & 0xffff) +
4771                               (i * sizeof(u32))),
4772                          (info->text_data ?
4773                           info->text_data[i] : 0));
4774         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4775                 write_op(tp, (cpu_scratch_base +
4776                               (info->rodata_base & 0xffff) +
4777                               (i * sizeof(u32))),
4778                          (info->rodata_data ?
4779                           info->rodata_data[i] : 0));
4780         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4781                 write_op(tp, (cpu_scratch_base +
4782                               (info->data_base & 0xffff) +
4783                               (i * sizeof(u32))),
4784                          (info->data_data ?
4785                           info->data_data[i] : 0));
4786
4787         err = 0;
4788
4789 out:
4790         return err;
4791 }
4792
4793 /* tp->lock is held. */
4794 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4795 {
4796         struct fw_info info;
4797         int err, i;
4798
4799         info.text_base = TG3_FW_TEXT_ADDR;
4800         info.text_len = TG3_FW_TEXT_LEN;
4801         info.text_data = &tg3FwText[0];
4802         info.rodata_base = TG3_FW_RODATA_ADDR;
4803         info.rodata_len = TG3_FW_RODATA_LEN;
4804         info.rodata_data = &tg3FwRodata[0];
4805         info.data_base = TG3_FW_DATA_ADDR;
4806         info.data_len = TG3_FW_DATA_LEN;
4807         info.data_data = NULL;
4808
4809         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4810                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4811                                     &info);
4812         if (err)
4813                 return err;
4814
4815         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4816                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4817                                     &info);
4818         if (err)
4819                 return err;
4820
4821         /* Now startup only the RX cpu. */
4822         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4823         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4824
4825         for (i = 0; i < 5; i++) {
4826                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4827                         break;
4828                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4829                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4830                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4831                 udelay(1000);
4832         }
4833         if (i >= 5) {
4834                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4835                        "to set RX CPU PC, is %08x should be %08x\n",
4836                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4837                        TG3_FW_TEXT_ADDR);
4838                 return -ENODEV;
4839         }
4840         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4841         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4842
4843         return 0;
4844 }
4845
4846 #if TG3_TSO_SUPPORT != 0
4847
4848 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4849 #define TG3_TSO_FW_RELASE_MINOR         0x6
4850 #define TG3_TSO_FW_RELEASE_FIX          0x0
4851 #define TG3_TSO_FW_START_ADDR           0x08000000
4852 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4853 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4854 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4855 #define TG3_TSO_FW_RODATA_LEN           0x60
4856 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4857 #define TG3_TSO_FW_DATA_LEN             0x30
4858 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4859 #define TG3_TSO_FW_SBSS_LEN             0x2c
4860 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4861 #define TG3_TSO_FW_BSS_LEN              0x894
4862
4863 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4864         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4865         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4866         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4867         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4868         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4869         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4870         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4871         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4872         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4873         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4874         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4875         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4876         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4877         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4878         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4879         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4880         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4881         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4882         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4883         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4884         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4885         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4886         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4887         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4888         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4889         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4890         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4891         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4892         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4893         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4894         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4895         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4896         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4897         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4898         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4899         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4900         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4901         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4902         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4903         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4904         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4905         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4906         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4907         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4908         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4909         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4910         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4911         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4912         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4913         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4914         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4915         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4916         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4917         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4918         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4919         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4920         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4921         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4922         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4923         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4924         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4925         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4926         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4927         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4928         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4929         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4930         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4931         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4932         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4933         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4934         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4935         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4936         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4937         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4938         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4939         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4940         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4941         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4942         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4943         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4944         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4945         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4946         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4947         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4948         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4949         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4950         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4951         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4952         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4953         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4954         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4955         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4956         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4957         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4958         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4959         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4960         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4961         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4962         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4963         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4964         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4965         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4966         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4967         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4968         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4969         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4970         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4971         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4972         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4973         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4974         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4975         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4976         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4977         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4978         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4979         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4980         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4981         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4982         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4983         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4984         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4985         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4986         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4987         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4988         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4989         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4990         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4991         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4992         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4993         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4994         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4995         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4996         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4997         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4998         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4999         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5000         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5001         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5002         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5003         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5004         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5005         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5006         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5007         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5008         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5009         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5010         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5011         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5012         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5013         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5014         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5015         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5016         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5017         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5018         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5019         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5020         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5021         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5022         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5023         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5024         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5025         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5026         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5027         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5028         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5029         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5030         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5031         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5032         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5033         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5034         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5035         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5036         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5037         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5038         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5039         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5040         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5041         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5042         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5043         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5044         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5045         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5046         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5047         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5048         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5049         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5050         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5051         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5052         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5053         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5054         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5055         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5056         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5057         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5058         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5059         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5060         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5061         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5062         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5063         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5064         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5065         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5066         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5067         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5068         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5069         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5070         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5071         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5072         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5073         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5074         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5075         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5076         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5077         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5078         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5079         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5080         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5081         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5082         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5083         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5084         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5085         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5086         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5087         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5088         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5089         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5090         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5091         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5092         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5093         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5094         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5095         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5096         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5097         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5098         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5099         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5100         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5101         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5102         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5103         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5104         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5105         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5106         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5107         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5108         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5109         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5110         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5111         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5112         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5113         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5114         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5115         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5116         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5117         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5118         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5119         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5120         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5121         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5122         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5123         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5124         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5125         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5126         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5127         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5128         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5129         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5130         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5131         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5132         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5133         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5134         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5135         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5136         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5137         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5138         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5139         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5140         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5141         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5142         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5143         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5144         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5145         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5146         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5147         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5148 };
5149
5150 static u32 tg3TsoFwRodata[] = {
5151         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5152         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5153         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5154         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5155         0x00000000,
5156 };
5157
5158 static u32 tg3TsoFwData[] = {
5159         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5160         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5161         0x00000000,
5162 };
5163
5164 /* 5705 needs a special version of the TSO firmware.  */
5165 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5166 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5167 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5168 #define TG3_TSO5_FW_START_ADDR          0x00010000
5169 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5170 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5171 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5172 #define TG3_TSO5_FW_RODATA_LEN          0x50
5173 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5174 #define TG3_TSO5_FW_DATA_LEN            0x20
5175 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5176 #define TG3_TSO5_FW_SBSS_LEN            0x28
5177 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5178 #define TG3_TSO5_FW_BSS_LEN             0x88
5179
5180 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5181         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5182         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5183         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5184         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5185         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5186         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5187         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5188         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5189         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5190         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5191         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5192         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5193         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5194         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5195         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5196         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5197         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5198         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5199         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5200         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5201         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5202         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5203         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5204         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5205         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5206         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5207         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5208         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5209         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5210         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5211         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5212         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5213         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5214         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5215         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5216         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5217         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5218         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5219         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5220         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5221         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5222         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5223         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5224         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5225         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5226         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5227         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5228         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5229         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5230         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5231         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5232         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5233         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5234         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5235         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5236         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5237         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5238         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5239         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5240         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5241         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5242         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5243         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5244         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5245         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5246         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5247         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5248         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5249         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5250         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5251         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5252         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5253         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5254         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5255         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5256         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5257         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5258         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5259         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5260         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5261         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5262         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5263         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5264         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5265         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5266         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5267         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5268         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5269         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5270         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5271         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5272         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5273         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5274         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5275         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5276         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5277         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5278         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5279         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5280         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5281         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5282         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5283         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5284         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5285         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5286         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5287         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5288         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5289         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5290         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5291         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5292         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5293         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5294         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5295         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5296         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5297         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5298         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5299         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5300         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5301         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5302         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5303         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5304         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5305         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5306         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5307         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5308         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5309         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5310         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5311         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5312         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5313         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5314         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5315         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5316         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5317         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5318         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5319         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5320         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5321         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5322         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5323         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5324         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5325         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5326         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5327         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5328         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5329         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5330         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5331         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5332         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5333         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5334         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5335         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5336         0x00000000, 0x00000000, 0x00000000,
5337 };
5338
5339 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5340         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5341         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5342         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5343         0x00000000, 0x00000000, 0x00000000,
5344 };
5345
5346 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5347         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5348         0x00000000, 0x00000000, 0x00000000,
5349 };
5350
5351 /* tp->lock is held. */
5352 static int tg3_load_tso_firmware(struct tg3 *tp)
5353 {
5354         struct fw_info info;
5355         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5356         int err, i;
5357
5358         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5359                 return 0;
5360
5361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5362                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5363                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5364                 info.text_data = &tg3Tso5FwText[0];
5365                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5366                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5367                 info.rodata_data = &tg3Tso5FwRodata[0];
5368                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5369                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5370                 info.data_data = &tg3Tso5FwData[0];
5371                 cpu_base = RX_CPU_BASE;
5372                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5373                 cpu_scratch_size = (info.text_len +
5374                                     info.rodata_len +
5375                                     info.data_len +
5376                                     TG3_TSO5_FW_SBSS_LEN +
5377                                     TG3_TSO5_FW_BSS_LEN);
5378         } else {
5379                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5380                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5381                 info.text_data = &tg3TsoFwText[0];
5382                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5383                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5384                 info.rodata_data = &tg3TsoFwRodata[0];
5385                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5386                 info.data_len = TG3_TSO_FW_DATA_LEN;
5387                 info.data_data = &tg3TsoFwData[0];
5388                 cpu_base = TX_CPU_BASE;
5389                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5390                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5391         }
5392
5393         err = tg3_load_firmware_cpu(tp, cpu_base,
5394                                     cpu_scratch_base, cpu_scratch_size,
5395                                     &info);
5396         if (err)
5397                 return err;
5398
5399         /* Now startup the cpu. */
5400         tw32(cpu_base + CPU_STATE, 0xffffffff);
5401         tw32_f(cpu_base + CPU_PC,    info.text_base);
5402
5403         for (i = 0; i < 5; i++) {
5404                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5405                         break;
5406                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5407                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5408                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5409                 udelay(1000);
5410         }
5411         if (i >= 5) {
5412                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5413                        "to set CPU PC, is %08x should be %08x\n",
5414                        tp->dev->name, tr32(cpu_base + CPU_PC),
5415                        info.text_base);
5416                 return -ENODEV;
5417         }
5418         tw32(cpu_base + CPU_STATE, 0xffffffff);
5419         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5420         return 0;
5421 }
5422
5423 #endif /* TG3_TSO_SUPPORT != 0 */
5424
5425 /* tp->lock is held. */
5426 static void __tg3_set_mac_addr(struct tg3 *tp)
5427 {
5428         u32 addr_high, addr_low;
5429         int i;
5430
5431         addr_high = ((tp->dev->dev_addr[0] << 8) |
5432                      tp->dev->dev_addr[1]);
5433         addr_low = ((tp->dev->dev_addr[2] << 24) |
5434                     (tp->dev->dev_addr[3] << 16) |
5435                     (tp->dev->dev_addr[4] <<  8) |
5436                     (tp->dev->dev_addr[5] <<  0));
5437         for (i = 0; i < 4; i++) {
5438                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5439                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5440         }
5441
5442         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5444                 for (i = 0; i < 12; i++) {
5445                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5446                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5447                 }
5448         }
5449
5450         addr_high = (tp->dev->dev_addr[0] +
5451                      tp->dev->dev_addr[1] +
5452                      tp->dev->dev_addr[2] +
5453                      tp->dev->dev_addr[3] +
5454                      tp->dev->dev_addr[4] +
5455                      tp->dev->dev_addr[5]) &
5456                 TX_BACKOFF_SEED_MASK;
5457         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5458 }
5459
5460 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5461 {
5462         struct tg3 *tp = netdev_priv(dev);
5463         struct sockaddr *addr = p;
5464
5465         if (!is_valid_ether_addr(addr->sa_data))
5466                 return -EINVAL;
5467
5468         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5469
5470         spin_lock_bh(&tp->lock);
5471         __tg3_set_mac_addr(tp);
5472         spin_unlock_bh(&tp->lock);
5473
5474         return 0;
5475 }
5476
5477 /* tp->lock is held. */
5478 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5479                            dma_addr_t mapping, u32 maxlen_flags,
5480                            u32 nic_addr)
5481 {
5482         tg3_write_mem(tp,
5483                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5484                       ((u64) mapping >> 32));
5485         tg3_write_mem(tp,
5486                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5487                       ((u64) mapping & 0xffffffff));
5488         tg3_write_mem(tp,
5489                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5490                        maxlen_flags);
5491
5492         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5493                 tg3_write_mem(tp,
5494                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5495                               nic_addr);
5496 }
5497
5498 static void __tg3_set_rx_mode(struct net_device *);
5499 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5500 {
5501         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5502         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5503         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5504         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5505         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5506                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5507                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5508         }
5509         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5510         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5511         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5512                 u32 val = ec->stats_block_coalesce_usecs;
5513
5514                 if (!netif_carrier_ok(tp->dev))
5515                         val = 0;
5516
5517                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5518         }
5519 }
5520
5521 /* tp->lock is held. */
5522 static int tg3_reset_hw(struct tg3 *tp)
5523 {
5524         u32 val, rdmac_mode;
5525         int i, err, limit;
5526
5527         tg3_disable_ints(tp);
5528
5529         tg3_stop_fw(tp);
5530
5531         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5532
5533         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5534                 tg3_abort_hw(tp, 1);
5535         }
5536
5537         err = tg3_chip_reset(tp);
5538         if (err)
5539                 return err;
5540
5541         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5542
5543         /* This works around an issue with Athlon chipsets on
5544          * B3 tigon3 silicon.  This bit has no effect on any
5545          * other revision.  But do not set this on PCI Express
5546          * chips.
5547          */
5548         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5549                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5550         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5551
5552         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5553             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5554                 val = tr32(TG3PCI_PCISTATE);
5555                 val |= PCISTATE_RETRY_SAME_DMA;
5556                 tw32(TG3PCI_PCISTATE, val);
5557         }
5558
5559         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5560                 /* Enable some hw fixes.  */
5561                 val = tr32(TG3PCI_MSI_DATA);
5562                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5563                 tw32(TG3PCI_MSI_DATA, val);
5564         }
5565
5566         /* Descriptor ring init may make accesses to the
5567          * NIC SRAM area to setup the TX descriptors, so we
5568          * can only do this after the hardware has been
5569          * successfully reset.
5570          */
5571         tg3_init_rings(tp);
5572
5573         /* This value is determined during the probe time DMA
5574          * engine test, tg3_test_dma.
5575          */
5576         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5577
5578         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5579                           GRC_MODE_4X_NIC_SEND_RINGS |
5580                           GRC_MODE_NO_TX_PHDR_CSUM |
5581                           GRC_MODE_NO_RX_PHDR_CSUM);
5582         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5583         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5584                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5585         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5586                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5587
5588         tw32(GRC_MODE,
5589              tp->grc_mode |
5590              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5591
5592         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5593         val = tr32(GRC_MISC_CFG);
5594         val &= ~0xff;
5595         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5596         tw32(GRC_MISC_CFG, val);
5597
5598         /* Initialize MBUF/DESC pool. */
5599         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5600                 /* Do nothing.  */
5601         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5602                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5603                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5604                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5605                 else
5606                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5607                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5608                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5609         }
5610 #if TG3_TSO_SUPPORT != 0
5611         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5612                 int fw_len;
5613
5614                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5615                           TG3_TSO5_FW_RODATA_LEN +
5616                           TG3_TSO5_FW_DATA_LEN +
5617                           TG3_TSO5_FW_SBSS_LEN +
5618                           TG3_TSO5_FW_BSS_LEN);
5619                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5620                 tw32(BUFMGR_MB_POOL_ADDR,
5621                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5622                 tw32(BUFMGR_MB_POOL_SIZE,
5623                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5624         }
5625 #endif
5626
5627         if (tp->dev->mtu <= ETH_DATA_LEN) {
5628                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5629                      tp->bufmgr_config.mbuf_read_dma_low_water);
5630                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5631                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5632                 tw32(BUFMGR_MB_HIGH_WATER,
5633                      tp->bufmgr_config.mbuf_high_water);
5634         } else {
5635                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5636                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5637                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5638                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5639                 tw32(BUFMGR_MB_HIGH_WATER,
5640                      tp->bufmgr_config.mbuf_high_water_jumbo);
5641         }
5642         tw32(BUFMGR_DMA_LOW_WATER,
5643              tp->bufmgr_config.dma_low_water);
5644         tw32(BUFMGR_DMA_HIGH_WATER,
5645              tp->bufmgr_config.dma_high_water);
5646
5647         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5648         for (i = 0; i < 2000; i++) {
5649                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5650                         break;
5651                 udelay(10);
5652         }
5653         if (i >= 2000) {
5654                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5655                        tp->dev->name);
5656                 return -ENODEV;
5657         }
5658
5659         /* Setup replenish threshold. */
5660         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5661
5662         /* Initialize TG3_BDINFO's at:
5663          *  RCVDBDI_STD_BD:     standard eth size rx ring
5664          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5665          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5666          *
5667          * like so:
5668          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5669          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5670          *                              ring attribute flags
5671          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5672          *
5673          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5674          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5675          *
5676          * The size of each ring is fixed in the firmware, but the location is
5677          * configurable.
5678          */
5679         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5680              ((u64) tp->rx_std_mapping >> 32));
5681         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5682              ((u64) tp->rx_std_mapping & 0xffffffff));
5683         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5684              NIC_SRAM_RX_BUFFER_DESC);
5685
5686         /* Don't even try to program the JUMBO/MINI buffer descriptor
5687          * configs on 5705.
5688          */
5689         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5690                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5691                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5692         } else {
5693                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5694                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5695
5696                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5697                      BDINFO_FLAGS_DISABLED);
5698
5699                 /* Setup replenish threshold. */
5700                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5701
5702                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5703                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5704                              ((u64) tp->rx_jumbo_mapping >> 32));
5705                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5706                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5707                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5708                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5709                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5710                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5711                 } else {
5712                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5713                              BDINFO_FLAGS_DISABLED);
5714                 }
5715
5716         }
5717
5718         /* There is only one send ring on 5705/5750, no need to explicitly
5719          * disable the others.
5720          */
5721         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5722                 /* Clear out send RCB ring in SRAM. */
5723                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5724                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5725                                       BDINFO_FLAGS_DISABLED);
5726         }
5727
5728         tp->tx_prod = 0;
5729         tp->tx_cons = 0;
5730         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5731         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5732
5733         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5734                        tp->tx_desc_mapping,
5735                        (TG3_TX_RING_SIZE <<
5736                         BDINFO_FLAGS_MAXLEN_SHIFT),
5737                        NIC_SRAM_TX_BUFFER_DESC);
5738
5739         /* There is only one receive return ring on 5705/5750, no need
5740          * to explicitly disable the others.
5741          */
5742         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5743                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5744                      i += TG3_BDINFO_SIZE) {
5745                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5746                                       BDINFO_FLAGS_DISABLED);
5747                 }
5748         }
5749
5750         tp->rx_rcb_ptr = 0;
5751         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5752
5753         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5754                        tp->rx_rcb_mapping,
5755                        (TG3_RX_RCB_RING_SIZE(tp) <<
5756                         BDINFO_FLAGS_MAXLEN_SHIFT),
5757                        0);
5758
5759         tp->rx_std_ptr = tp->rx_pending;
5760         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5761                      tp->rx_std_ptr);
5762
5763         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5764                                                 tp->rx_jumbo_pending : 0;
5765         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5766                      tp->rx_jumbo_ptr);
5767
5768         /* Initialize MAC address and backoff seed. */
5769         __tg3_set_mac_addr(tp);
5770
5771         /* MTU + ethernet header + FCS + optional VLAN tag */
5772         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5773
5774         /* The slot time is changed by tg3_setup_phy if we
5775          * run at gigabit with half duplex.
5776          */
5777         tw32(MAC_TX_LENGTHS,
5778              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5779              (6 << TX_LENGTHS_IPG_SHIFT) |
5780              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5781
5782         /* Receive rules. */
5783         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5784         tw32(RCVLPC_CONFIG, 0x0181);
5785
5786         /* Calculate RDMAC_MODE setting early, we need it to determine
5787          * the RCVLPC_STATE_ENABLE mask.
5788          */
5789         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5790                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5791                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5792                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5793                       RDMAC_MODE_LNGREAD_ENAB);
5794         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5795                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5796
5797         /* If statement applies to 5705 and 5750 PCI devices only */
5798         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5799              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5800             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5801                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5802                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5803                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5804                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5805                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5806                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5807                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5808                 }
5809         }
5810
5811         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5812                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5813
5814 #if TG3_TSO_SUPPORT != 0
5815         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5816                 rdmac_mode |= (1 << 27);
5817 #endif
5818
5819         /* Receive/send statistics. */
5820         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5821             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5822                 val = tr32(RCVLPC_STATS_ENABLE);
5823                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5824                 tw32(RCVLPC_STATS_ENABLE, val);
5825         } else {
5826                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5827         }
5828         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5829         tw32(SNDDATAI_STATSENAB, 0xffffff);
5830         tw32(SNDDATAI_STATSCTRL,
5831              (SNDDATAI_SCTRL_ENABLE |
5832               SNDDATAI_SCTRL_FASTUPD));
5833
5834         /* Setup host coalescing engine. */
5835         tw32(HOSTCC_MODE, 0);
5836         for (i = 0; i < 2000; i++) {
5837                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5838                         break;
5839                 udelay(10);
5840         }
5841
5842         __tg3_set_coalesce(tp, &tp->coal);
5843
5844         /* set status block DMA address */
5845         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5846              ((u64) tp->status_mapping >> 32));
5847         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5848              ((u64) tp->status_mapping & 0xffffffff));
5849
5850         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5851                 /* Status/statistics block address.  See tg3_timer,
5852                  * the tg3_periodic_fetch_stats call there, and
5853                  * tg3_get_stats to see how this works for 5705/5750 chips.
5854                  */
5855                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5856                      ((u64) tp->stats_mapping >> 32));
5857                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5858                      ((u64) tp->stats_mapping & 0xffffffff));
5859                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5860                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5861         }
5862
5863         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5864
5865         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5866         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5867         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5868                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5869
5870         /* Clear statistics/status block in chip, and status block in ram. */
5871         for (i = NIC_SRAM_STATS_BLK;
5872              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5873              i += sizeof(u32)) {
5874                 tg3_write_mem(tp, i, 0);
5875                 udelay(40);
5876         }
5877         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5878
5879         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5880                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5881                 /* reset to prevent losing 1st rx packet intermittently */
5882                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5883                 udelay(10);
5884         }
5885
5886         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5887                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5888         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5889         udelay(40);
5890
5891         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5892          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5893          * register to preserve the GPIO settings for LOMs. The GPIOs,
5894          * whether used as inputs or outputs, are set by boot code after
5895          * reset.
5896          */
5897         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5898                 u32 gpio_mask;
5899
5900                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5901                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5902
5903                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5904                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5905                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5906
5907                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5908
5909                 /* GPIO1 must be driven high for eeprom write protect */
5910                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5911                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5912         }
5913         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5914         udelay(100);
5915
5916         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5917         tp->last_tag = 0;
5918
5919         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5920                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5921                 udelay(40);
5922         }
5923
5924         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5925                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5926                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5927                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5928                WDMAC_MODE_LNGREAD_ENAB);
5929
5930         /* If statement applies to 5705 and 5750 PCI devices only */
5931         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5932              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5934                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5935                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5936                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5937                         /* nothing */
5938                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5939                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5940                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5941                         val |= WDMAC_MODE_RX_ACCEL;
5942                 }
5943         }
5944
5945         tw32_f(WDMAC_MODE, val);
5946         udelay(40);
5947
5948         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5949                 val = tr32(TG3PCI_X_CAPS);
5950                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5951                         val &= ~PCIX_CAPS_BURST_MASK;
5952                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5953                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5954                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5955                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5956                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5957                                 val |= (tp->split_mode_max_reqs <<
5958                                         PCIX_CAPS_SPLIT_SHIFT);
5959                 }
5960                 tw32(TG3PCI_X_CAPS, val);
5961         }
5962
5963         tw32_f(RDMAC_MODE, rdmac_mode);
5964         udelay(40);
5965
5966         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5967         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5968                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5969         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5970         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5971         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5972         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5973         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5974 #if TG3_TSO_SUPPORT != 0
5975         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5976                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5977 #endif
5978         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5979         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5980
5981         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5982                 err = tg3_load_5701_a0_firmware_fix(tp);
5983                 if (err)
5984                         return err;
5985         }
5986
5987 #if TG3_TSO_SUPPORT != 0
5988         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5989                 err = tg3_load_tso_firmware(tp);
5990                 if (err)
5991                         return err;
5992         }
5993 #endif
5994
5995         tp->tx_mode = TX_MODE_ENABLE;
5996         tw32_f(MAC_TX_MODE, tp->tx_mode);
5997         udelay(100);
5998
5999         tp->rx_mode = RX_MODE_ENABLE;
6000         tw32_f(MAC_RX_MODE, tp->rx_mode);
6001         udelay(10);
6002
6003         if (tp->link_config.phy_is_low_power) {
6004                 tp->link_config.phy_is_low_power = 0;
6005                 tp->link_config.speed = tp->link_config.orig_speed;
6006                 tp->link_config.duplex = tp->link_config.orig_duplex;
6007                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6008         }
6009
6010         tp->mi_mode = MAC_MI_MODE_BASE;
6011         tw32_f(MAC_MI_MODE, tp->mi_mode);
6012         udelay(80);
6013
6014         tw32(MAC_LED_CTRL, tp->led_ctrl);
6015
6016         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6017         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6018                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6019                 udelay(10);
6020         }
6021         tw32_f(MAC_RX_MODE, tp->rx_mode);
6022         udelay(10);
6023
6024         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6025                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6026                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6027                         /* Set drive transmission level to 1.2V  */
6028                         /* only if the signal pre-emphasis bit is not set  */
6029                         val = tr32(MAC_SERDES_CFG);
6030                         val &= 0xfffff000;
6031                         val |= 0x880;
6032                         tw32(MAC_SERDES_CFG, val);
6033                 }
6034                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6035                         tw32(MAC_SERDES_CFG, 0x616000);
6036         }
6037
6038         /* Prevent chip from dropping frames when flow control
6039          * is enabled.
6040          */
6041         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6042
6043         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6044             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6045                 /* Use hardware link auto-negotiation */
6046                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6047         }
6048
6049         err = tg3_setup_phy(tp, 1);
6050         if (err)
6051                 return err;
6052
6053         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6054                 u32 tmp;
6055
6056                 /* Clear CRC stats. */
6057                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6058                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6059                         tg3_readphy(tp, 0x14, &tmp);
6060                 }
6061         }
6062
6063         __tg3_set_rx_mode(tp->dev);
6064
6065         /* Initialize receive rules. */
6066         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6067         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6068         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6069         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6070
6071         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6072             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6073                 limit = 8;
6074         else
6075                 limit = 16;
6076         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6077                 limit -= 4;
6078         switch (limit) {
6079         case 16:
6080                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6081         case 15:
6082                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6083         case 14:
6084                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6085         case 13:
6086                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6087         case 12:
6088                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6089         case 11:
6090                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6091         case 10:
6092                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6093         case 9:
6094                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6095         case 8:
6096                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6097         case 7:
6098                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6099         case 6:
6100                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6101         case 5:
6102                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6103         case 4:
6104                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6105         case 3:
6106                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6107         case 2:
6108         case 1:
6109
6110         default:
6111                 break;
6112         };
6113
6114         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6115
6116         return 0;
6117 }
6118
6119 /* Called at device open time to get the chip ready for
6120  * packet processing.  Invoked with tp->lock held.
6121  */
6122 static int tg3_init_hw(struct tg3 *tp)
6123 {
6124         int err;
6125
6126         /* Force the chip into D0. */
6127         err = tg3_set_power_state(tp, 0);
6128         if (err)
6129                 goto out;
6130
6131         tg3_switch_clocks(tp);
6132
6133         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6134
6135         err = tg3_reset_hw(tp);
6136
6137 out:
6138         return err;
6139 }
6140
6141 #define TG3_STAT_ADD32(PSTAT, REG) \
6142 do {    u32 __val = tr32(REG); \
6143         (PSTAT)->low += __val; \
6144         if ((PSTAT)->low < __val) \
6145                 (PSTAT)->high += 1; \
6146 } while (0)
6147
6148 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6149 {
6150         struct tg3_hw_stats *sp = tp->hw_stats;
6151
6152         if (!netif_carrier_ok(tp->dev))
6153                 return;
6154
6155         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6156         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6157         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6158         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6159         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6160         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6161         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6162         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6163         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6164         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6165         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6166         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6167         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6168
6169         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6170         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6171         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6172         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6173         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6174         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6175         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6176         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6177         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6178         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6179         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6180         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6181         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6182         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6183 }
6184
6185 static void tg3_timer(unsigned long __opaque)
6186 {
6187         struct tg3 *tp = (struct tg3 *) __opaque;
6188
6189         spin_lock(&tp->lock);
6190
6191         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6192                 /* All of this garbage is because when using non-tagged
6193                  * IRQ status the mailbox/status_block protocol the chip
6194                  * uses with the cpu is race prone.
6195                  */
6196                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6197                         tw32(GRC_LOCAL_CTRL,
6198                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6199                 } else {
6200                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6201                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6202                 }
6203
6204                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6205                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6206                         spin_unlock(&tp->lock);
6207                         schedule_work(&tp->reset_task);
6208                         return;
6209                 }
6210         }
6211
6212         /* This part only runs once per second. */
6213         if (!--tp->timer_counter) {
6214                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6215                         tg3_periodic_fetch_stats(tp);
6216
6217                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6218                         u32 mac_stat;
6219                         int phy_event;
6220
6221                         mac_stat = tr32(MAC_STATUS);
6222
6223                         phy_event = 0;
6224                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6225                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6226                                         phy_event = 1;
6227                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6228                                 phy_event = 1;
6229
6230                         if (phy_event)
6231                                 tg3_setup_phy(tp, 0);
6232                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6233                         u32 mac_stat = tr32(MAC_STATUS);
6234                         int need_setup = 0;
6235
6236                         if (netif_carrier_ok(tp->dev) &&
6237                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6238                                 need_setup = 1;
6239                         }
6240                         if (! netif_carrier_ok(tp->dev) &&
6241                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6242                                          MAC_STATUS_SIGNAL_DET))) {
6243                                 need_setup = 1;
6244                         }
6245                         if (need_setup) {
6246                                 tw32_f(MAC_MODE,
6247                                      (tp->mac_mode &
6248                                       ~MAC_MODE_PORT_MODE_MASK));
6249                                 udelay(40);
6250                                 tw32_f(MAC_MODE, tp->mac_mode);
6251                                 udelay(40);
6252                                 tg3_setup_phy(tp, 0);
6253                         }
6254                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6255                         tg3_serdes_parallel_detect(tp);
6256
6257                 tp->timer_counter = tp->timer_multiplier;
6258         }
6259
6260         /* Heartbeat is only sent once every 2 seconds.  */
6261         if (!--tp->asf_counter) {
6262                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6263                         u32 val;
6264
6265                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6266                                            FWCMD_NICDRV_ALIVE2);
6267                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6268                         /* 5 seconds timeout */
6269                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6270                         val = tr32(GRC_RX_CPU_EVENT);
6271                         val |= (1 << 14);
6272                         tw32(GRC_RX_CPU_EVENT, val);
6273                 }
6274                 tp->asf_counter = tp->asf_multiplier;
6275         }
6276
6277         spin_unlock(&tp->lock);
6278
6279         tp->timer.expires = jiffies + tp->timer_offset;
6280         add_timer(&tp->timer);
6281 }
6282
6283 static int tg3_test_interrupt(struct tg3 *tp)
6284 {
6285         struct net_device *dev = tp->dev;
6286         int err, i;
6287         u32 int_mbox = 0;
6288
6289         if (!netif_running(dev))
6290                 return -ENODEV;
6291
6292         tg3_disable_ints(tp);
6293
6294         free_irq(tp->pdev->irq, dev);
6295
6296         err = request_irq(tp->pdev->irq, tg3_test_isr,
6297                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6298         if (err)
6299                 return err;
6300
6301         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6302         tg3_enable_ints(tp);
6303
6304         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6305                HOSTCC_MODE_NOW);
6306
6307         for (i = 0; i < 5; i++) {
6308                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6309                                         TG3_64BIT_REG_LOW);
6310                 if (int_mbox != 0)
6311                         break;
6312                 msleep(10);
6313         }
6314
6315         tg3_disable_ints(tp);
6316
6317         free_irq(tp->pdev->irq, dev);
6318         
6319         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6320                 err = request_irq(tp->pdev->irq, tg3_msi,
6321                                   SA_SAMPLE_RANDOM, dev->name, dev);
6322         else {
6323                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6324                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6325                         fn = tg3_interrupt_tagged;
6326                 err = request_irq(tp->pdev->irq, fn,
6327                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6328         }
6329
6330         if (err)
6331                 return err;
6332
6333         if (int_mbox != 0)
6334                 return 0;
6335
6336         return -EIO;
6337 }
6338
6339 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6340  * successfully restored
6341  */
6342 static int tg3_test_msi(struct tg3 *tp)
6343 {
6344         struct net_device *dev = tp->dev;
6345         int err;
6346         u16 pci_cmd;
6347
6348         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6349                 return 0;
6350
6351         /* Turn off SERR reporting in case MSI terminates with Master
6352          * Abort.
6353          */
6354         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6355         pci_write_config_word(tp->pdev, PCI_COMMAND,
6356                               pci_cmd & ~PCI_COMMAND_SERR);
6357
6358         err = tg3_test_interrupt(tp);
6359
6360         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6361
6362         if (!err)
6363                 return 0;
6364
6365         /* other failures */
6366         if (err != -EIO)
6367                 return err;
6368
6369         /* MSI test failed, go back to INTx mode */
6370         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6371                "switching to INTx mode. Please report this failure to "
6372                "the PCI maintainer and include system chipset information.\n",
6373                        tp->dev->name);
6374
6375         free_irq(tp->pdev->irq, dev);
6376         pci_disable_msi(tp->pdev);
6377
6378         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6379
6380         {
6381                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6382                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6383                         fn = tg3_interrupt_tagged;
6384
6385                 err = request_irq(tp->pdev->irq, fn,
6386                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6387         }
6388         if (err)
6389                 return err;
6390
6391         /* Need to reset the chip because the MSI cycle may have terminated
6392          * with Master Abort.
6393          */
6394         tg3_full_lock(tp, 1);
6395
6396         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6397         err = tg3_init_hw(tp);
6398
6399         tg3_full_unlock(tp);
6400
6401         if (err)
6402                 free_irq(tp->pdev->irq, dev);
6403
6404         return err;
6405 }
6406
6407 static int tg3_open(struct net_device *dev)
6408 {
6409         struct tg3 *tp = netdev_priv(dev);
6410         int err;
6411
6412         tg3_full_lock(tp, 0);
6413
6414         tg3_disable_ints(tp);
6415         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6416
6417         tg3_full_unlock(tp);
6418
6419         /* The placement of this call is tied
6420          * to the setup and use of Host TX descriptors.
6421          */
6422         err = tg3_alloc_consistent(tp);
6423         if (err)
6424                 return err;
6425
6426         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6427             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6428             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6429                 /* All MSI supporting chips should support tagged
6430                  * status.  Assert that this is the case.
6431                  */
6432                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6433                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6434                                "Not using MSI.\n", tp->dev->name);
6435                 } else if (pci_enable_msi(tp->pdev) == 0) {
6436                         u32 msi_mode;
6437
6438                         msi_mode = tr32(MSGINT_MODE);
6439                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6440                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6441                 }
6442         }
6443         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6444                 err = request_irq(tp->pdev->irq, tg3_msi,
6445                                   SA_SAMPLE_RANDOM, dev->name, dev);
6446         else {
6447                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6448                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6449                         fn = tg3_interrupt_tagged;
6450
6451                 err = request_irq(tp->pdev->irq, fn,
6452                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6453         }
6454
6455         if (err) {
6456                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6457                         pci_disable_msi(tp->pdev);
6458                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6459                 }
6460                 tg3_free_consistent(tp);
6461                 return err;
6462         }
6463
6464         tg3_full_lock(tp, 0);
6465
6466         err = tg3_init_hw(tp);
6467         if (err) {
6468                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6469                 tg3_free_rings(tp);
6470         } else {
6471                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6472                         tp->timer_offset = HZ;
6473                 else
6474                         tp->timer_offset = HZ / 10;
6475
6476                 BUG_ON(tp->timer_offset > HZ);
6477                 tp->timer_counter = tp->timer_multiplier =
6478                         (HZ / tp->timer_offset);
6479                 tp->asf_counter = tp->asf_multiplier =
6480                         ((HZ / tp->timer_offset) * 2);
6481
6482                 init_timer(&tp->timer);
6483                 tp->timer.expires = jiffies + tp->timer_offset;
6484                 tp->timer.data = (unsigned long) tp;
6485                 tp->timer.function = tg3_timer;
6486         }
6487
6488         tg3_full_unlock(tp);
6489
6490         if (err) {
6491                 free_irq(tp->pdev->irq, dev);
6492                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6493                         pci_disable_msi(tp->pdev);
6494                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6495                 }
6496                 tg3_free_consistent(tp);
6497                 return err;
6498         }
6499
6500         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6501                 err = tg3_test_msi(tp);
6502
6503                 if (err) {
6504                         tg3_full_lock(tp, 0);
6505
6506                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6507                                 pci_disable_msi(tp->pdev);
6508                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6509                         }
6510                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6511                         tg3_free_rings(tp);
6512                         tg3_free_consistent(tp);
6513
6514                         tg3_full_unlock(tp);
6515
6516                         return err;
6517                 }
6518         }
6519
6520         tg3_full_lock(tp, 0);
6521
6522         add_timer(&tp->timer);
6523         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6524         tg3_enable_ints(tp);
6525
6526         tg3_full_unlock(tp);
6527
6528         netif_start_queue(dev);
6529
6530         return 0;
6531 }
6532
6533 #if 0
6534 /*static*/ void tg3_dump_state(struct tg3 *tp)
6535 {
6536         u32 val32, val32_2, val32_3, val32_4, val32_5;
6537         u16 val16;
6538         int i;
6539
6540         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6541         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6542         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6543                val16, val32);
6544
6545         /* MAC block */
6546         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6547                tr32(MAC_MODE), tr32(MAC_STATUS));
6548         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6549                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6550         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6551                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6552         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6553                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6554
6555         /* Send data initiator control block */
6556         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6557                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6558         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6559                tr32(SNDDATAI_STATSCTRL));
6560
6561         /* Send data completion control block */
6562         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6563
6564         /* Send BD ring selector block */
6565         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6566                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6567
6568         /* Send BD initiator control block */
6569         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6570                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6571
6572         /* Send BD completion control block */
6573         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6574
6575         /* Receive list placement control block */
6576         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6577                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6578         printk("       RCVLPC_STATSCTRL[%08x]\n",
6579                tr32(RCVLPC_STATSCTRL));
6580
6581         /* Receive data and receive BD initiator control block */
6582         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6583                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6584
6585         /* Receive data completion control block */
6586         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6587                tr32(RCVDCC_MODE));
6588
6589         /* Receive BD initiator control block */
6590         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6591                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6592
6593         /* Receive BD completion control block */
6594         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6595                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6596
6597         /* Receive list selector control block */
6598         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6599                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6600
6601         /* Mbuf cluster free block */
6602         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6603                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6604
6605         /* Host coalescing control block */
6606         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6607                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6608         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6609                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6610                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6611         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6612                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6613                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6614         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6615                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6616         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6617                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6618
6619         /* Memory arbiter control block */
6620         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6621                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6622
6623         /* Buffer manager control block */
6624         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6625                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6626         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6627                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6628         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6629                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6630                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6631                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6632
6633         /* Read DMA control block */
6634         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6635                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6636
6637         /* Write DMA control block */
6638         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6639                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6640
6641         /* DMA completion block */
6642         printk("DEBUG: DMAC_MODE[%08x]\n",
6643                tr32(DMAC_MODE));
6644
6645         /* GRC block */
6646         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6647                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6648         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6649                tr32(GRC_LOCAL_CTRL));
6650
6651         /* TG3_BDINFOs */
6652         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6653                tr32(RCVDBDI_JUMBO_BD + 0x0),
6654                tr32(RCVDBDI_JUMBO_BD + 0x4),
6655                tr32(RCVDBDI_JUMBO_BD + 0x8),
6656                tr32(RCVDBDI_JUMBO_BD + 0xc));
6657         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6658                tr32(RCVDBDI_STD_BD + 0x0),
6659                tr32(RCVDBDI_STD_BD + 0x4),
6660                tr32(RCVDBDI_STD_BD + 0x8),
6661                tr32(RCVDBDI_STD_BD + 0xc));
6662         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6663                tr32(RCVDBDI_MINI_BD + 0x0),
6664                tr32(RCVDBDI_MINI_BD + 0x4),
6665                tr32(RCVDBDI_MINI_BD + 0x8),
6666                tr32(RCVDBDI_MINI_BD + 0xc));
6667
6668         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6669         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6670         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6671         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6672         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6673                val32, val32_2, val32_3, val32_4);
6674
6675         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6676         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6677         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6678         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6679         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6680                val32, val32_2, val32_3, val32_4);
6681
6682         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6683         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6684         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6685         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6686         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6687         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6688                val32, val32_2, val32_3, val32_4, val32_5);
6689
6690         /* SW status block */
6691         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6692                tp->hw_status->status,
6693                tp->hw_status->status_tag,
6694                tp->hw_status->rx_jumbo_consumer,
6695                tp->hw_status->rx_consumer,
6696                tp->hw_status->rx_mini_consumer,
6697                tp->hw_status->idx[0].rx_producer,
6698                tp->hw_status->idx[0].tx_consumer);
6699
6700         /* SW statistics block */
6701         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6702                ((u32 *)tp->hw_stats)[0],
6703                ((u32 *)tp->hw_stats)[1],
6704                ((u32 *)tp->hw_stats)[2],
6705                ((u32 *)tp->hw_stats)[3]);
6706
6707         /* Mailboxes */
6708         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6709                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6710                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6711                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6712                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6713
6714         /* NIC side send descriptors. */
6715         for (i = 0; i < 6; i++) {
6716                 unsigned long txd;
6717
6718                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6719                         + (i * sizeof(struct tg3_tx_buffer_desc));
6720                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6721                        i,
6722                        readl(txd + 0x0), readl(txd + 0x4),
6723                        readl(txd + 0x8), readl(txd + 0xc));
6724         }
6725
6726         /* NIC side RX descriptors. */
6727         for (i = 0; i < 6; i++) {
6728                 unsigned long rxd;
6729
6730                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6731                         + (i * sizeof(struct tg3_rx_buffer_desc));
6732                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6733                        i,
6734                        readl(rxd + 0x0), readl(rxd + 0x4),
6735                        readl(rxd + 0x8), readl(rxd + 0xc));
6736                 rxd += (4 * sizeof(u32));
6737                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6738                        i,
6739                        readl(rxd + 0x0), readl(rxd + 0x4),
6740                        readl(rxd + 0x8), readl(rxd + 0xc));
6741         }
6742
6743         for (i = 0; i < 6; i++) {
6744                 unsigned long rxd;
6745
6746                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6747                         + (i * sizeof(struct tg3_rx_buffer_desc));
6748                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6749                        i,
6750                        readl(rxd + 0x0), readl(rxd + 0x4),
6751                        readl(rxd + 0x8), readl(rxd + 0xc));
6752                 rxd += (4 * sizeof(u32));
6753                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6754                        i,
6755                        readl(rxd + 0x0), readl(rxd + 0x4),
6756                        readl(rxd + 0x8), readl(rxd + 0xc));
6757         }
6758 }
6759 #endif
6760
6761 static struct net_device_stats *tg3_get_stats(struct net_device *);
6762 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6763
6764 static int tg3_close(struct net_device *dev)
6765 {
6766         struct tg3 *tp = netdev_priv(dev);
6767
6768         netif_stop_queue(dev);
6769
6770         del_timer_sync(&tp->timer);
6771
6772         tg3_full_lock(tp, 1);
6773 #if 0
6774         tg3_dump_state(tp);
6775 #endif
6776
6777         tg3_disable_ints(tp);
6778
6779         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6780         tg3_free_rings(tp);
6781         tp->tg3_flags &=
6782                 ~(TG3_FLAG_INIT_COMPLETE |
6783                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6784         netif_carrier_off(tp->dev);
6785
6786         tg3_full_unlock(tp);
6787
6788         free_irq(tp->pdev->irq, dev);
6789         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6790                 pci_disable_msi(tp->pdev);
6791                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6792         }
6793
6794         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6795                sizeof(tp->net_stats_prev));
6796         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6797                sizeof(tp->estats_prev));
6798
6799         tg3_free_consistent(tp);
6800
6801         return 0;
6802 }
6803
6804 static inline unsigned long get_stat64(tg3_stat64_t *val)
6805 {
6806         unsigned long ret;
6807
6808 #if (BITS_PER_LONG == 32)
6809         ret = val->low;
6810 #else
6811         ret = ((u64)val->high << 32) | ((u64)val->low);
6812 #endif
6813         return ret;
6814 }
6815
6816 static unsigned long calc_crc_errors(struct tg3 *tp)
6817 {
6818         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6819
6820         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6821             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6822              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6823                 u32 val;
6824
6825                 spin_lock_bh(&tp->lock);
6826                 if (!tg3_readphy(tp, 0x1e, &val)) {
6827                         tg3_writephy(tp, 0x1e, val | 0x8000);
6828                         tg3_readphy(tp, 0x14, &val);
6829                 } else
6830                         val = 0;
6831                 spin_unlock_bh(&tp->lock);
6832
6833                 tp->phy_crc_errors += val;
6834
6835                 return tp->phy_crc_errors;
6836         }
6837
6838         return get_stat64(&hw_stats->rx_fcs_errors);
6839 }
6840
6841 #define ESTAT_ADD(member) \
6842         estats->member =        old_estats->member + \
6843                                 get_stat64(&hw_stats->member)
6844
6845 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6846 {
6847         struct tg3_ethtool_stats *estats = &tp->estats;
6848         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6849         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6850
6851         if (!hw_stats)
6852                 return old_estats;
6853
6854         ESTAT_ADD(rx_octets);
6855         ESTAT_ADD(rx_fragments);
6856         ESTAT_ADD(rx_ucast_packets);
6857         ESTAT_ADD(rx_mcast_packets);
6858         ESTAT_ADD(rx_bcast_packets);
6859         ESTAT_ADD(rx_fcs_errors);
6860         ESTAT_ADD(rx_align_errors);
6861         ESTAT_ADD(rx_xon_pause_rcvd);
6862         ESTAT_ADD(rx_xoff_pause_rcvd);
6863         ESTAT_ADD(rx_mac_ctrl_rcvd);
6864         ESTAT_ADD(rx_xoff_entered);
6865         ESTAT_ADD(rx_frame_too_long_errors);
6866         ESTAT_ADD(rx_jabbers);
6867         ESTAT_ADD(rx_undersize_packets);
6868         ESTAT_ADD(rx_in_length_errors);
6869         ESTAT_ADD(rx_out_length_errors);
6870         ESTAT_ADD(rx_64_or_less_octet_packets);
6871         ESTAT_ADD(rx_65_to_127_octet_packets);
6872         ESTAT_ADD(rx_128_to_255_octet_packets);
6873         ESTAT_ADD(rx_256_to_511_octet_packets);
6874         ESTAT_ADD(rx_512_to_1023_octet_packets);
6875         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6876         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6877         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6878         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6879         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6880
6881         ESTAT_ADD(tx_octets);
6882         ESTAT_ADD(tx_collisions);
6883         ESTAT_ADD(tx_xon_sent);
6884         ESTAT_ADD(tx_xoff_sent);
6885         ESTAT_ADD(tx_flow_control);
6886         ESTAT_ADD(tx_mac_errors);
6887         ESTAT_ADD(tx_single_collisions);
6888         ESTAT_ADD(tx_mult_collisions);
6889         ESTAT_ADD(tx_deferred);
6890         ESTAT_ADD(tx_excessive_collisions);
6891         ESTAT_ADD(tx_late_collisions);
6892         ESTAT_ADD(tx_collide_2times);
6893         ESTAT_ADD(tx_collide_3times);
6894         ESTAT_ADD(tx_collide_4times);
6895         ESTAT_ADD(tx_collide_5times);
6896         ESTAT_ADD(tx_collide_6times);
6897         ESTAT_ADD(tx_collide_7times);
6898         ESTAT_ADD(tx_collide_8times);
6899         ESTAT_ADD(tx_collide_9times);
6900         ESTAT_ADD(tx_collide_10times);
6901         ESTAT_ADD(tx_collide_11times);
6902         ESTAT_ADD(tx_collide_12times);
6903         ESTAT_ADD(tx_collide_13times);
6904         ESTAT_ADD(tx_collide_14times);
6905         ESTAT_ADD(tx_collide_15times);
6906         ESTAT_ADD(tx_ucast_packets);
6907         ESTAT_ADD(tx_mcast_packets);
6908         ESTAT_ADD(tx_bcast_packets);
6909         ESTAT_ADD(tx_carrier_sense_errors);
6910         ESTAT_ADD(tx_discards);
6911         ESTAT_ADD(tx_errors);
6912
6913         ESTAT_ADD(dma_writeq_full);
6914         ESTAT_ADD(dma_write_prioq_full);
6915         ESTAT_ADD(rxbds_empty);
6916         ESTAT_ADD(rx_discards);
6917         ESTAT_ADD(rx_errors);
6918         ESTAT_ADD(rx_threshold_hit);
6919
6920         ESTAT_ADD(dma_readq_full);
6921         ESTAT_ADD(dma_read_prioq_full);
6922         ESTAT_ADD(tx_comp_queue_full);
6923
6924         ESTAT_ADD(ring_set_send_prod_index);
6925         ESTAT_ADD(ring_status_update);
6926         ESTAT_ADD(nic_irqs);
6927         ESTAT_ADD(nic_avoided_irqs);
6928         ESTAT_ADD(nic_tx_threshold_hit);
6929
6930         return estats;
6931 }
6932
6933 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6934 {
6935         struct tg3 *tp = netdev_priv(dev);
6936         struct net_device_stats *stats = &tp->net_stats;
6937         struct net_device_stats *old_stats = &tp->net_stats_prev;
6938         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6939
6940         if (!hw_stats)
6941                 return old_stats;
6942
6943         stats->rx_packets = old_stats->rx_packets +
6944                 get_stat64(&hw_stats->rx_ucast_packets) +
6945                 get_stat64(&hw_stats->rx_mcast_packets) +
6946                 get_stat64(&hw_stats->rx_bcast_packets);
6947                 
6948         stats->tx_packets = old_stats->tx_packets +
6949                 get_stat64(&hw_stats->tx_ucast_packets) +
6950                 get_stat64(&hw_stats->tx_mcast_packets) +
6951                 get_stat64(&hw_stats->tx_bcast_packets);
6952
6953         stats->rx_bytes = old_stats->rx_bytes +
6954                 get_stat64(&hw_stats->rx_octets);
6955         stats->tx_bytes = old_stats->tx_bytes +
6956                 get_stat64(&hw_stats->tx_octets);
6957
6958         stats->rx_errors = old_stats->rx_errors +
6959                 get_stat64(&hw_stats->rx_errors);
6960         stats->tx_errors = old_stats->tx_errors +
6961                 get_stat64(&hw_stats->tx_errors) +
6962                 get_stat64(&hw_stats->tx_mac_errors) +
6963                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6964                 get_stat64(&hw_stats->tx_discards);
6965
6966         stats->multicast = old_stats->multicast +
6967                 get_stat64(&hw_stats->rx_mcast_packets);
6968         stats->collisions = old_stats->collisions +
6969                 get_stat64(&hw_stats->tx_collisions);
6970
6971         stats->rx_length_errors = old_stats->rx_length_errors +
6972                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6973                 get_stat64(&hw_stats->rx_undersize_packets);
6974
6975         stats->rx_over_errors = old_stats->rx_over_errors +
6976                 get_stat64(&hw_stats->rxbds_empty);
6977         stats->rx_frame_errors = old_stats->rx_frame_errors +
6978                 get_stat64(&hw_stats->rx_align_errors);
6979         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6980                 get_stat64(&hw_stats->tx_discards);
6981         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6982                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6983
6984         stats->rx_crc_errors = old_stats->rx_crc_errors +
6985                 calc_crc_errors(tp);
6986
6987         stats->rx_missed_errors = old_stats->rx_missed_errors +
6988                 get_stat64(&hw_stats->rx_discards);
6989
6990         return stats;
6991 }
6992
6993 static inline u32 calc_crc(unsigned char *buf, int len)
6994 {
6995         u32 reg;
6996         u32 tmp;
6997         int j, k;
6998
6999         reg = 0xffffffff;
7000
7001         for (j = 0; j < len; j++) {
7002                 reg ^= buf[j];
7003
7004                 for (k = 0; k < 8; k++) {
7005                         tmp = reg & 0x01;
7006
7007                         reg >>= 1;
7008
7009                         if (tmp) {
7010                                 reg ^= 0xedb88320;
7011                         }
7012                 }
7013         }
7014
7015         return ~reg;
7016 }
7017
7018 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7019 {
7020         /* accept or reject all multicast frames */
7021         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7022         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7023         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7024         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7025 }
7026
7027 static void __tg3_set_rx_mode(struct net_device *dev)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030         u32 rx_mode;
7031
7032         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7033                                   RX_MODE_KEEP_VLAN_TAG);
7034
7035         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7036          * flag clear.
7037          */
7038 #if TG3_VLAN_TAG_USED
7039         if (!tp->vlgrp &&
7040             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7041                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7042 #else
7043         /* By definition, VLAN is disabled always in this
7044          * case.
7045          */
7046         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7047                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7048 #endif
7049
7050         if (dev->flags & IFF_PROMISC) {
7051                 /* Promiscuous mode. */
7052                 rx_mode |= RX_MODE_PROMISC;
7053         } else if (dev->flags & IFF_ALLMULTI) {
7054                 /* Accept all multicast. */
7055                 tg3_set_multi (tp, 1);
7056         } else if (dev->mc_count < 1) {
7057                 /* Reject all multicast. */
7058                 tg3_set_multi (tp, 0);
7059         } else {
7060                 /* Accept one or more multicast(s). */
7061                 struct dev_mc_list *mclist;
7062                 unsigned int i;
7063                 u32 mc_filter[4] = { 0, };
7064                 u32 regidx;
7065                 u32 bit;
7066                 u32 crc;
7067
7068                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7069                      i++, mclist = mclist->next) {
7070
7071                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7072                         bit = ~crc & 0x7f;
7073                         regidx = (bit & 0x60) >> 5;
7074                         bit &= 0x1f;
7075                         mc_filter[regidx] |= (1 << bit);
7076                 }
7077
7078                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7079                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7080                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7081                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7082         }
7083
7084         if (rx_mode != tp->rx_mode) {
7085                 tp->rx_mode = rx_mode;
7086                 tw32_f(MAC_RX_MODE, rx_mode);
7087                 udelay(10);
7088         }
7089 }
7090
7091 static void tg3_set_rx_mode(struct net_device *dev)
7092 {
7093         struct tg3 *tp = netdev_priv(dev);
7094
7095         tg3_full_lock(tp, 0);
7096         __tg3_set_rx_mode(dev);
7097         tg3_full_unlock(tp);
7098 }
7099
7100 #define TG3_REGDUMP_LEN         (32 * 1024)
7101
7102 static int tg3_get_regs_len(struct net_device *dev)
7103 {
7104         return TG3_REGDUMP_LEN;
7105 }
7106
7107 static void tg3_get_regs(struct net_device *dev,
7108                 struct ethtool_regs *regs, void *_p)
7109 {
7110         u32 *p = _p;
7111         struct tg3 *tp = netdev_priv(dev);
7112         u8 *orig_p = _p;
7113         int i;
7114
7115         regs->version = 0;
7116
7117         memset(p, 0, TG3_REGDUMP_LEN);
7118
7119         tg3_full_lock(tp, 0);
7120
7121 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7122 #define GET_REG32_LOOP(base,len)                \
7123 do {    p = (u32 *)(orig_p + (base));           \
7124         for (i = 0; i < len; i += 4)            \
7125                 __GET_REG32((base) + i);        \
7126 } while (0)
7127 #define GET_REG32_1(reg)                        \
7128 do {    p = (u32 *)(orig_p + (reg));            \
7129         __GET_REG32((reg));                     \
7130 } while (0)
7131
7132         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7133         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7134         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7135         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7136         GET_REG32_1(SNDDATAC_MODE);
7137         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7138         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7139         GET_REG32_1(SNDBDC_MODE);
7140         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7141         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7142         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7143         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7144         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7145         GET_REG32_1(RCVDCC_MODE);
7146         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7147         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7148         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7149         GET_REG32_1(MBFREE_MODE);
7150         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7151         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7152         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7153         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7154         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7155         GET_REG32_1(RX_CPU_MODE);
7156         GET_REG32_1(RX_CPU_STATE);
7157         GET_REG32_1(RX_CPU_PGMCTR);
7158         GET_REG32_1(RX_CPU_HWBKPT);
7159         GET_REG32_1(TX_CPU_MODE);
7160         GET_REG32_1(TX_CPU_STATE);
7161         GET_REG32_1(TX_CPU_PGMCTR);
7162         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7163         GET_REG32_LOOP(FTQ_RESET, 0x120);
7164         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7165         GET_REG32_1(DMAC_MODE);
7166         GET_REG32_LOOP(GRC_MODE, 0x4c);
7167         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7168                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7169
7170 #undef __GET_REG32
7171 #undef GET_REG32_LOOP
7172 #undef GET_REG32_1
7173
7174         tg3_full_unlock(tp);
7175 }
7176
7177 static int tg3_get_eeprom_len(struct net_device *dev)
7178 {
7179         struct tg3 *tp = netdev_priv(dev);
7180
7181         return tp->nvram_size;
7182 }
7183
7184 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7185
7186 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7187 {
7188         struct tg3 *tp = netdev_priv(dev);
7189         int ret;
7190         u8  *pd;
7191         u32 i, offset, len, val, b_offset, b_count;
7192
7193         offset = eeprom->offset;
7194         len = eeprom->len;
7195         eeprom->len = 0;
7196
7197         eeprom->magic = TG3_EEPROM_MAGIC;
7198
7199         if (offset & 3) {
7200                 /* adjustments to start on required 4 byte boundary */
7201                 b_offset = offset & 3;
7202                 b_count = 4 - b_offset;
7203                 if (b_count > len) {
7204                         /* i.e. offset=1 len=2 */
7205                         b_count = len;
7206                 }
7207                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7208                 if (ret)
7209                         return ret;
7210                 val = cpu_to_le32(val);
7211                 memcpy(data, ((char*)&val) + b_offset, b_count);
7212                 len -= b_count;
7213                 offset += b_count;
7214                 eeprom->len += b_count;
7215         }
7216
7217         /* read bytes upto the last 4 byte boundary */
7218         pd = &data[eeprom->len];
7219         for (i = 0; i < (len - (len & 3)); i += 4) {
7220                 ret = tg3_nvram_read(tp, offset + i, &val);
7221                 if (ret) {
7222                         eeprom->len += i;
7223                         return ret;
7224                 }
7225                 val = cpu_to_le32(val);
7226                 memcpy(pd + i, &val, 4);
7227         }
7228         eeprom->len += i;
7229
7230         if (len & 3) {
7231                 /* read last bytes not ending on 4 byte boundary */
7232                 pd = &data[eeprom->len];
7233                 b_count = len & 3;
7234                 b_offset = offset + len - b_count;
7235                 ret = tg3_nvram_read(tp, b_offset, &val);
7236                 if (ret)
7237                         return ret;
7238                 val = cpu_to_le32(val);
7239                 memcpy(pd, ((char*)&val), b_count);
7240                 eeprom->len += b_count;
7241         }
7242         return 0;
7243 }
7244
7245 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7246
7247 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7248 {
7249         struct tg3 *tp = netdev_priv(dev);
7250         int ret;
7251         u32 offset, len, b_offset, odd_len, start, end;
7252         u8 *buf;
7253
7254         if (eeprom->magic != TG3_EEPROM_MAGIC)
7255                 return -EINVAL;
7256
7257         offset = eeprom->offset;
7258         len = eeprom->len;
7259
7260         if ((b_offset = (offset & 3))) {
7261                 /* adjustments to start on required 4 byte boundary */
7262                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7263                 if (ret)
7264                         return ret;
7265                 start = cpu_to_le32(start);
7266                 len += b_offset;
7267                 offset &= ~3;
7268                 if (len < 4)
7269                         len = 4;
7270         }
7271
7272         odd_len = 0;
7273         if (len & 3) {
7274                 /* adjustments to end on required 4 byte boundary */
7275                 odd_len = 1;
7276                 len = (len + 3) & ~3;
7277                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7278                 if (ret)
7279                         return ret;
7280                 end = cpu_to_le32(end);
7281         }
7282
7283         buf = data;
7284         if (b_offset || odd_len) {
7285                 buf = kmalloc(len, GFP_KERNEL);
7286                 if (buf == 0)
7287                         return -ENOMEM;
7288                 if (b_offset)
7289                         memcpy(buf, &start, 4);
7290                 if (odd_len)
7291                         memcpy(buf+len-4, &end, 4);
7292                 memcpy(buf + b_offset, data, eeprom->len);
7293         }
7294
7295         ret = tg3_nvram_write_block(tp, offset, len, buf);
7296
7297         if (buf != data)
7298                 kfree(buf);
7299
7300         return ret;
7301 }
7302
7303 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7304 {
7305         struct tg3 *tp = netdev_priv(dev);
7306   
7307         cmd->supported = (SUPPORTED_Autoneg);
7308
7309         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7310                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7311                                    SUPPORTED_1000baseT_Full);
7312
7313         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7314                 cmd->supported |= (SUPPORTED_100baseT_Half |
7315                                   SUPPORTED_100baseT_Full |
7316                                   SUPPORTED_10baseT_Half |
7317                                   SUPPORTED_10baseT_Full |
7318                                   SUPPORTED_MII);
7319         else
7320                 cmd->supported |= SUPPORTED_FIBRE;
7321   
7322         cmd->advertising = tp->link_config.advertising;
7323         if (netif_running(dev)) {
7324                 cmd->speed = tp->link_config.active_speed;
7325                 cmd->duplex = tp->link_config.active_duplex;
7326         }
7327         cmd->port = 0;
7328         cmd->phy_address = PHY_ADDR;
7329         cmd->transceiver = 0;
7330         cmd->autoneg = tp->link_config.autoneg;
7331         cmd->maxtxpkt = 0;
7332         cmd->maxrxpkt = 0;
7333         return 0;
7334 }
7335   
7336 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7337 {
7338         struct tg3 *tp = netdev_priv(dev);
7339   
7340         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7341                 /* These are the only valid advertisement bits allowed.  */
7342                 if (cmd->autoneg == AUTONEG_ENABLE &&
7343                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7344                                           ADVERTISED_1000baseT_Full |
7345                                           ADVERTISED_Autoneg |
7346                                           ADVERTISED_FIBRE)))
7347                         return -EINVAL;
7348                 /* Fiber can only do SPEED_1000.  */
7349                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7350                          (cmd->speed != SPEED_1000))
7351                         return -EINVAL;
7352         /* Copper cannot force SPEED_1000.  */
7353         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7354                    (cmd->speed == SPEED_1000))
7355                 return -EINVAL;
7356         else if ((cmd->speed == SPEED_1000) &&
7357                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7358                 return -EINVAL;
7359
7360         tg3_full_lock(tp, 0);
7361
7362         tp->link_config.autoneg = cmd->autoneg;
7363         if (cmd->autoneg == AUTONEG_ENABLE) {
7364                 tp->link_config.advertising = cmd->advertising;
7365                 tp->link_config.speed = SPEED_INVALID;
7366                 tp->link_config.duplex = DUPLEX_INVALID;
7367         } else {
7368                 tp->link_config.advertising = 0;
7369                 tp->link_config.speed = cmd->speed;
7370                 tp->link_config.duplex = cmd->duplex;
7371         }
7372   
7373         if (netif_running(dev))
7374                 tg3_setup_phy(tp, 1);
7375
7376         tg3_full_unlock(tp);
7377   
7378         return 0;
7379 }
7380   
7381 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7382 {
7383         struct tg3 *tp = netdev_priv(dev);
7384   
7385         strcpy(info->driver, DRV_MODULE_NAME);
7386         strcpy(info->version, DRV_MODULE_VERSION);
7387         strcpy(info->bus_info, pci_name(tp->pdev));
7388 }
7389   
7390 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7391 {
7392         struct tg3 *tp = netdev_priv(dev);
7393   
7394         wol->supported = WAKE_MAGIC;
7395         wol->wolopts = 0;
7396         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7397                 wol->wolopts = WAKE_MAGIC;
7398         memset(&wol->sopass, 0, sizeof(wol->sopass));
7399 }
7400   
7401 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7402 {
7403         struct tg3 *tp = netdev_priv(dev);
7404   
7405         if (wol->wolopts & ~WAKE_MAGIC)
7406                 return -EINVAL;
7407         if ((wol->wolopts & WAKE_MAGIC) &&
7408             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7409             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7410                 return -EINVAL;
7411   
7412         spin_lock_bh(&tp->lock);
7413         if (wol->wolopts & WAKE_MAGIC)
7414                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7415         else
7416                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7417         spin_unlock_bh(&tp->lock);
7418   
7419         return 0;
7420 }
7421   
7422 static u32 tg3_get_msglevel(struct net_device *dev)
7423 {
7424         struct tg3 *tp = netdev_priv(dev);
7425         return tp->msg_enable;
7426 }
7427   
7428 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7429 {
7430         struct tg3 *tp = netdev_priv(dev);
7431         tp->msg_enable = value;
7432 }
7433   
7434 #if TG3_TSO_SUPPORT != 0
7435 static int tg3_set_tso(struct net_device *dev, u32 value)
7436 {
7437         struct tg3 *tp = netdev_priv(dev);
7438
7439         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7440                 if (value)
7441                         return -EINVAL;
7442                 return 0;
7443         }
7444         return ethtool_op_set_tso(dev, value);
7445 }
7446 #endif
7447   
7448 static int tg3_nway_reset(struct net_device *dev)
7449 {
7450         struct tg3 *tp = netdev_priv(dev);
7451         u32 bmcr;
7452         int r;
7453   
7454         if (!netif_running(dev))
7455                 return -EAGAIN;
7456
7457         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7458                 return -EINVAL;
7459
7460         spin_lock_bh(&tp->lock);
7461         r = -EINVAL;
7462         tg3_readphy(tp, MII_BMCR, &bmcr);
7463         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7464             ((bmcr & BMCR_ANENABLE) ||
7465              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7466                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7467                                            BMCR_ANENABLE);
7468                 r = 0;
7469         }
7470         spin_unlock_bh(&tp->lock);
7471   
7472         return r;
7473 }
7474   
7475 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7476 {
7477         struct tg3 *tp = netdev_priv(dev);
7478   
7479         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7480         ering->rx_mini_max_pending = 0;
7481         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7482
7483         ering->rx_pending = tp->rx_pending;
7484         ering->rx_mini_pending = 0;
7485         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7486         ering->tx_pending = tp->tx_pending;
7487 }
7488   
7489 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7490 {
7491         struct tg3 *tp = netdev_priv(dev);
7492         int irq_sync = 0;
7493   
7494         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7495             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7496             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7497                 return -EINVAL;
7498   
7499         if (netif_running(dev)) {
7500                 tg3_netif_stop(tp);
7501                 irq_sync = 1;
7502         }
7503
7504         tg3_full_lock(tp, irq_sync);
7505   
7506         tp->rx_pending = ering->rx_pending;
7507
7508         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7509             tp->rx_pending > 63)
7510                 tp->rx_pending = 63;
7511         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7512         tp->tx_pending = ering->tx_pending;
7513
7514         if (netif_running(dev)) {
7515                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7516                 tg3_init_hw(tp);
7517                 tg3_netif_start(tp);
7518         }
7519
7520         tg3_full_unlock(tp);
7521   
7522         return 0;
7523 }
7524   
7525 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7526 {
7527         struct tg3 *tp = netdev_priv(dev);
7528   
7529         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7530         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7531         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7532 }
7533   
7534 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7535 {
7536         struct tg3 *tp = netdev_priv(dev);
7537         int irq_sync = 0;
7538   
7539         if (netif_running(dev)) {
7540                 tg3_netif_stop(tp);
7541                 irq_sync = 1;
7542         }
7543
7544         tg3_full_lock(tp, irq_sync);
7545
7546         if (epause->autoneg)
7547                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7548         else
7549                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7550         if (epause->rx_pause)
7551                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7552         else
7553                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7554         if (epause->tx_pause)
7555                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7556         else
7557                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7558
7559         if (netif_running(dev)) {
7560                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7561                 tg3_init_hw(tp);
7562                 tg3_netif_start(tp);
7563         }
7564
7565         tg3_full_unlock(tp);
7566   
7567         return 0;
7568 }
7569   
7570 static u32 tg3_get_rx_csum(struct net_device *dev)
7571 {
7572         struct tg3 *tp = netdev_priv(dev);
7573         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7574 }
7575   
7576 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7577 {
7578         struct tg3 *tp = netdev_priv(dev);
7579   
7580         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7581                 if (data != 0)
7582                         return -EINVAL;
7583                 return 0;
7584         }
7585   
7586         spin_lock_bh(&tp->lock);
7587         if (data)
7588                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7589         else
7590                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7591         spin_unlock_bh(&tp->lock);
7592   
7593         return 0;
7594 }
7595   
7596 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7597 {
7598         struct tg3 *tp = netdev_priv(dev);
7599   
7600         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7601                 if (data != 0)
7602                         return -EINVAL;
7603                 return 0;
7604         }
7605   
7606         if (data)
7607                 dev->features |= NETIF_F_IP_CSUM;
7608         else
7609                 dev->features &= ~NETIF_F_IP_CSUM;
7610
7611         return 0;
7612 }
7613
7614 static int tg3_get_stats_count (struct net_device *dev)
7615 {
7616         return TG3_NUM_STATS;
7617 }
7618
7619 static int tg3_get_test_count (struct net_device *dev)
7620 {
7621         return TG3_NUM_TEST;
7622 }
7623
7624 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7625 {
7626         switch (stringset) {
7627         case ETH_SS_STATS:
7628                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7629                 break;
7630         case ETH_SS_TEST:
7631                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7632                 break;
7633         default:
7634                 WARN_ON(1);     /* we need a WARN() */
7635                 break;
7636         }
7637 }
7638
7639 static int tg3_phys_id(struct net_device *dev, u32 data)
7640 {
7641         struct tg3 *tp = netdev_priv(dev);
7642         int i;
7643
7644         if (!netif_running(tp->dev))
7645                 return -EAGAIN;
7646
7647         if (data == 0)
7648                 data = 2;
7649
7650         for (i = 0; i < (data * 2); i++) {
7651                 if ((i % 2) == 0)
7652                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7653                                            LED_CTRL_1000MBPS_ON |
7654                                            LED_CTRL_100MBPS_ON |
7655                                            LED_CTRL_10MBPS_ON |
7656                                            LED_CTRL_TRAFFIC_OVERRIDE |
7657                                            LED_CTRL_TRAFFIC_BLINK |
7658                                            LED_CTRL_TRAFFIC_LED);
7659         
7660                 else
7661                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7662                                            LED_CTRL_TRAFFIC_OVERRIDE);
7663
7664                 if (msleep_interruptible(500))
7665                         break;
7666         }
7667         tw32(MAC_LED_CTRL, tp->led_ctrl);
7668         return 0;
7669 }
7670
7671 static void tg3_get_ethtool_stats (struct net_device *dev,
7672                                    struct ethtool_stats *estats, u64 *tmp_stats)
7673 {
7674         struct tg3 *tp = netdev_priv(dev);
7675         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7676 }
7677
7678 #define NVRAM_TEST_SIZE 0x100
7679
7680 static int tg3_test_nvram(struct tg3 *tp)
7681 {
7682         u32 *buf, csum;
7683         int i, j, err = 0;
7684
7685         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7686         if (buf == NULL)
7687                 return -ENOMEM;
7688
7689         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7690                 u32 val;
7691
7692                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7693                         break;
7694                 buf[j] = cpu_to_le32(val);
7695         }
7696         if (i < NVRAM_TEST_SIZE)
7697                 goto out;
7698
7699         err = -EIO;
7700         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7701                 goto out;
7702
7703         /* Bootstrap checksum at offset 0x10 */
7704         csum = calc_crc((unsigned char *) buf, 0x10);
7705         if(csum != cpu_to_le32(buf[0x10/4]))
7706                 goto out;
7707
7708         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7709         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7710         if (csum != cpu_to_le32(buf[0xfc/4]))
7711                  goto out;
7712
7713         err = 0;
7714
7715 out:
7716         kfree(buf);
7717         return err;
7718 }
7719
7720 #define TG3_SERDES_TIMEOUT_SEC  2
7721 #define TG3_COPPER_TIMEOUT_SEC  6
7722
7723 static int tg3_test_link(struct tg3 *tp)
7724 {
7725         int i, max;
7726
7727         if (!netif_running(tp->dev))
7728                 return -ENODEV;
7729
7730         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7731                 max = TG3_SERDES_TIMEOUT_SEC;
7732         else
7733                 max = TG3_COPPER_TIMEOUT_SEC;
7734
7735         for (i = 0; i < max; i++) {
7736                 if (netif_carrier_ok(tp->dev))
7737                         return 0;
7738
7739                 if (msleep_interruptible(1000))
7740                         break;
7741         }
7742
7743         return -EIO;
7744 }
7745
7746 /* Only test the commonly used registers */
7747 static int tg3_test_registers(struct tg3 *tp)
7748 {
7749         int i, is_5705;
7750         u32 offset, read_mask, write_mask, val, save_val, read_val;
7751         static struct {
7752                 u16 offset;
7753                 u16 flags;
7754 #define TG3_FL_5705     0x1
7755 #define TG3_FL_NOT_5705 0x2
7756 #define TG3_FL_NOT_5788 0x4
7757                 u32 read_mask;
7758                 u32 write_mask;
7759         } reg_tbl[] = {
7760                 /* MAC Control Registers */
7761                 { MAC_MODE, TG3_FL_NOT_5705,
7762                         0x00000000, 0x00ef6f8c },
7763                 { MAC_MODE, TG3_FL_5705,
7764                         0x00000000, 0x01ef6b8c },
7765                 { MAC_STATUS, TG3_FL_NOT_5705,
7766                         0x03800107, 0x00000000 },
7767                 { MAC_STATUS, TG3_FL_5705,
7768                         0x03800100, 0x00000000 },
7769                 { MAC_ADDR_0_HIGH, 0x0000,
7770                         0x00000000, 0x0000ffff },
7771                 { MAC_ADDR_0_LOW, 0x0000,
7772                         0x00000000, 0xffffffff },
7773                 { MAC_RX_MTU_SIZE, 0x0000,
7774                         0x00000000, 0x0000ffff },
7775                 { MAC_TX_MODE, 0x0000,
7776                         0x00000000, 0x00000070 },
7777                 { MAC_TX_LENGTHS, 0x0000,
7778                         0x00000000, 0x00003fff },
7779                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7780                         0x00000000, 0x000007fc },
7781                 { MAC_RX_MODE, TG3_FL_5705,
7782                         0x00000000, 0x000007dc },
7783                 { MAC_HASH_REG_0, 0x0000,
7784                         0x00000000, 0xffffffff },
7785                 { MAC_HASH_REG_1, 0x0000,
7786                         0x00000000, 0xffffffff },
7787                 { MAC_HASH_REG_2, 0x0000,
7788                         0x00000000, 0xffffffff },
7789                 { MAC_HASH_REG_3, 0x0000,
7790                         0x00000000, 0xffffffff },
7791
7792                 /* Receive Data and Receive BD Initiator Control Registers. */
7793                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7794                         0x00000000, 0xffffffff },
7795                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7796                         0x00000000, 0xffffffff },
7797                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7798                         0x00000000, 0x00000003 },
7799                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7800                         0x00000000, 0xffffffff },
7801                 { RCVDBDI_STD_BD+0, 0x0000,
7802                         0x00000000, 0xffffffff },
7803                 { RCVDBDI_STD_BD+4, 0x0000,
7804                         0x00000000, 0xffffffff },
7805                 { RCVDBDI_STD_BD+8, 0x0000,
7806                         0x00000000, 0xffff0002 },
7807                 { RCVDBDI_STD_BD+0xc, 0x0000,
7808                         0x00000000, 0xffffffff },
7809         
7810                 /* Receive BD Initiator Control Registers. */
7811                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7812                         0x00000000, 0xffffffff },
7813                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7814                         0x00000000, 0x000003ff },
7815                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7816                         0x00000000, 0xffffffff },
7817         
7818                 /* Host Coalescing Control Registers. */
7819                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7820                         0x00000000, 0x00000004 },
7821                 { HOSTCC_MODE, TG3_FL_5705,
7822                         0x00000000, 0x000000f6 },
7823                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7824                         0x00000000, 0xffffffff },
7825                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7826                         0x00000000, 0x000003ff },
7827                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7828                         0x00000000, 0xffffffff },
7829                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7830                         0x00000000, 0x000003ff },
7831                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7832                         0x00000000, 0xffffffff },
7833                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7834                         0x00000000, 0x000000ff },
7835                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7836                         0x00000000, 0xffffffff },
7837                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7838                         0x00000000, 0x000000ff },
7839                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7840                         0x00000000, 0xffffffff },
7841                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7842                         0x00000000, 0xffffffff },
7843                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7844                         0x00000000, 0xffffffff },
7845                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7846                         0x00000000, 0x000000ff },
7847                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7848                         0x00000000, 0xffffffff },
7849                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7850                         0x00000000, 0x000000ff },
7851                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7852                         0x00000000, 0xffffffff },
7853                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7854                         0x00000000, 0xffffffff },
7855                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7856                         0x00000000, 0xffffffff },
7857                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7858                         0x00000000, 0xffffffff },
7859                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7860                         0x00000000, 0xffffffff },
7861                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7862                         0xffffffff, 0x00000000 },
7863                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7864                         0xffffffff, 0x00000000 },
7865
7866                 /* Buffer Manager Control Registers. */
7867                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7868                         0x00000000, 0x007fff80 },
7869                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7870                         0x00000000, 0x007fffff },
7871                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7872                         0x00000000, 0x0000003f },
7873                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7874                         0x00000000, 0x000001ff },
7875                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7876                         0x00000000, 0x000001ff },
7877                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7878                         0xffffffff, 0x00000000 },
7879                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7880                         0xffffffff, 0x00000000 },
7881         
7882                 /* Mailbox Registers */
7883                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7884                         0x00000000, 0x000001ff },
7885                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7886                         0x00000000, 0x000001ff },
7887                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7888                         0x00000000, 0x000007ff },
7889                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7890                         0x00000000, 0x000001ff },
7891
7892                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7893         };
7894
7895         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7896                 is_5705 = 1;
7897         else
7898                 is_5705 = 0;
7899
7900         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7901                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7902                         continue;
7903
7904                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7905                         continue;
7906
7907                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7908                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7909                         continue;
7910
7911                 offset = (u32) reg_tbl[i].offset;
7912                 read_mask = reg_tbl[i].read_mask;
7913                 write_mask = reg_tbl[i].write_mask;
7914
7915                 /* Save the original register content */
7916                 save_val = tr32(offset);
7917
7918                 /* Determine the read-only value. */
7919                 read_val = save_val & read_mask;
7920
7921                 /* Write zero to the register, then make sure the read-only bits
7922                  * are not changed and the read/write bits are all zeros.
7923                  */
7924                 tw32(offset, 0);
7925
7926                 val = tr32(offset);
7927
7928                 /* Test the read-only and read/write bits. */
7929                 if (((val & read_mask) != read_val) || (val & write_mask))
7930                         goto out;
7931
7932                 /* Write ones to all the bits defined by RdMask and WrMask, then
7933                  * make sure the read-only bits are not changed and the
7934                  * read/write bits are all ones.
7935                  */
7936                 tw32(offset, read_mask | write_mask);
7937
7938                 val = tr32(offset);
7939
7940                 /* Test the read-only bits. */
7941                 if ((val & read_mask) != read_val)
7942                         goto out;
7943
7944                 /* Test the read/write bits. */
7945                 if ((val & write_mask) != write_mask)
7946                         goto out;
7947
7948                 tw32(offset, save_val);
7949         }
7950
7951         return 0;
7952
7953 out:
7954         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7955         tw32(offset, save_val);
7956         return -EIO;
7957 }
7958
7959 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7960 {
7961         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7962         int i;
7963         u32 j;
7964
7965         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7966                 for (j = 0; j < len; j += 4) {
7967                         u32 val;
7968
7969                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7970                         tg3_read_mem(tp, offset + j, &val);
7971                         if (val != test_pattern[i])
7972                                 return -EIO;
7973                 }
7974         }
7975         return 0;
7976 }
7977
7978 static int tg3_test_memory(struct tg3 *tp)
7979 {
7980         static struct mem_entry {
7981                 u32 offset;
7982                 u32 len;
7983         } mem_tbl_570x[] = {
7984                 { 0x00000000, 0x00b50},
7985                 { 0x00002000, 0x1c000},
7986                 { 0xffffffff, 0x00000}
7987         }, mem_tbl_5705[] = {
7988                 { 0x00000100, 0x0000c},
7989                 { 0x00000200, 0x00008},
7990                 { 0x00004000, 0x00800},
7991                 { 0x00006000, 0x01000},
7992                 { 0x00008000, 0x02000},
7993                 { 0x00010000, 0x0e000},
7994                 { 0xffffffff, 0x00000}
7995         };
7996         struct mem_entry *mem_tbl;
7997         int err = 0;
7998         int i;
7999
8000         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8001                 mem_tbl = mem_tbl_5705;
8002         else
8003                 mem_tbl = mem_tbl_570x;
8004
8005         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8006                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8007                     mem_tbl[i].len)) != 0)
8008                         break;
8009         }
8010         
8011         return err;
8012 }
8013
8014 #define TG3_MAC_LOOPBACK        0
8015 #define TG3_PHY_LOOPBACK        1
8016
8017 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8018 {
8019         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8020         u32 desc_idx;
8021         struct sk_buff *skb, *rx_skb;
8022         u8 *tx_data;
8023         dma_addr_t map;
8024         int num_pkts, tx_len, rx_len, i, err;
8025         struct tg3_rx_buffer_desc *desc;
8026
8027         if (loopback_mode == TG3_MAC_LOOPBACK) {
8028                 /* HW errata - mac loopback fails in some cases on 5780.
8029                  * Normal traffic and PHY loopback are not affected by
8030                  * errata.
8031                  */
8032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8033                         return 0;
8034
8035                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8036                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8037                            MAC_MODE_PORT_MODE_GMII;
8038                 tw32(MAC_MODE, mac_mode);
8039         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8040                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8041                                            BMCR_SPEED1000);
8042                 udelay(40);
8043                 /* reset to prevent losing 1st rx packet intermittently */
8044                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8045                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8046                         udelay(10);
8047                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8048                 }
8049                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8050                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8051                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8052                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8053                 tw32(MAC_MODE, mac_mode);
8054         }
8055         else
8056                 return -EINVAL;
8057
8058         err = -EIO;
8059
8060         tx_len = 1514;
8061         skb = dev_alloc_skb(tx_len);
8062         tx_data = skb_put(skb, tx_len);
8063         memcpy(tx_data, tp->dev->dev_addr, 6);
8064         memset(tx_data + 6, 0x0, 8);
8065
8066         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8067
8068         for (i = 14; i < tx_len; i++)
8069                 tx_data[i] = (u8) (i & 0xff);
8070
8071         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8072
8073         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8074              HOSTCC_MODE_NOW);
8075
8076         udelay(10);
8077
8078         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8079
8080         num_pkts = 0;
8081
8082         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8083
8084         tp->tx_prod++;
8085         num_pkts++;
8086
8087         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8088                      tp->tx_prod);
8089         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8090
8091         udelay(10);
8092
8093         for (i = 0; i < 10; i++) {
8094                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8095                        HOSTCC_MODE_NOW);
8096
8097                 udelay(10);
8098
8099                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8100                 rx_idx = tp->hw_status->idx[0].rx_producer;
8101                 if ((tx_idx == tp->tx_prod) &&
8102                     (rx_idx == (rx_start_idx + num_pkts)))
8103                         break;
8104         }
8105
8106         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8107         dev_kfree_skb(skb);
8108
8109         if (tx_idx != tp->tx_prod)
8110                 goto out;
8111
8112         if (rx_idx != rx_start_idx + num_pkts)
8113                 goto out;
8114
8115         desc = &tp->rx_rcb[rx_start_idx];
8116         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8117         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8118         if (opaque_key != RXD_OPAQUE_RING_STD)
8119                 goto out;
8120
8121         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8122             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8123                 goto out;
8124
8125         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8126         if (rx_len != tx_len)
8127                 goto out;
8128
8129         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8130
8131         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8132         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8133
8134         for (i = 14; i < tx_len; i++) {
8135                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8136                         goto out;
8137         }
8138         err = 0;
8139         
8140         /* tg3_free_rings will unmap and free the rx_skb */
8141 out:
8142         return err;
8143 }
8144
8145 #define TG3_MAC_LOOPBACK_FAILED         1
8146 #define TG3_PHY_LOOPBACK_FAILED         2
8147 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8148                                          TG3_PHY_LOOPBACK_FAILED)
8149
8150 static int tg3_test_loopback(struct tg3 *tp)
8151 {
8152         int err = 0;
8153
8154         if (!netif_running(tp->dev))
8155                 return TG3_LOOPBACK_FAILED;
8156
8157         tg3_reset_hw(tp);
8158
8159         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8160                 err |= TG3_MAC_LOOPBACK_FAILED;
8161         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8162                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8163                         err |= TG3_PHY_LOOPBACK_FAILED;
8164         }
8165
8166         return err;
8167 }
8168
8169 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8170                           u64 *data)
8171 {
8172         struct tg3 *tp = netdev_priv(dev);
8173
8174         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8175
8176         if (tg3_test_nvram(tp) != 0) {
8177                 etest->flags |= ETH_TEST_FL_FAILED;
8178                 data[0] = 1;
8179         }
8180         if (tg3_test_link(tp) != 0) {
8181                 etest->flags |= ETH_TEST_FL_FAILED;
8182                 data[1] = 1;
8183         }
8184         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8185                 int irq_sync = 0;
8186
8187                 if (netif_running(dev)) {
8188                         tg3_netif_stop(tp);
8189                         irq_sync = 1;
8190                 }
8191
8192                 tg3_full_lock(tp, irq_sync);
8193
8194                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8195                 tg3_nvram_lock(tp);
8196                 tg3_halt_cpu(tp, RX_CPU_BASE);
8197                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8198                         tg3_halt_cpu(tp, TX_CPU_BASE);
8199                 tg3_nvram_unlock(tp);
8200
8201                 if (tg3_test_registers(tp) != 0) {
8202                         etest->flags |= ETH_TEST_FL_FAILED;
8203                         data[2] = 1;
8204                 }
8205                 if (tg3_test_memory(tp) != 0) {
8206                         etest->flags |= ETH_TEST_FL_FAILED;
8207                         data[3] = 1;
8208                 }
8209                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8210                         etest->flags |= ETH_TEST_FL_FAILED;
8211
8212                 tg3_full_unlock(tp);
8213
8214                 if (tg3_test_interrupt(tp) != 0) {
8215                         etest->flags |= ETH_TEST_FL_FAILED;
8216                         data[5] = 1;
8217                 }
8218
8219                 tg3_full_lock(tp, 0);
8220
8221                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8222                 if (netif_running(dev)) {
8223                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8224                         tg3_init_hw(tp);
8225                         tg3_netif_start(tp);
8226                 }
8227
8228                 tg3_full_unlock(tp);
8229         }
8230 }
8231
8232 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8233 {
8234         struct mii_ioctl_data *data = if_mii(ifr);
8235         struct tg3 *tp = netdev_priv(dev);
8236         int err;
8237
8238         switch(cmd) {
8239         case SIOCGMIIPHY:
8240                 data->phy_id = PHY_ADDR;
8241
8242                 /* fallthru */
8243         case SIOCGMIIREG: {
8244                 u32 mii_regval;
8245
8246                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8247                         break;                  /* We have no PHY */
8248
8249                 spin_lock_bh(&tp->lock);
8250                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8251                 spin_unlock_bh(&tp->lock);
8252
8253                 data->val_out = mii_regval;
8254
8255                 return err;
8256         }
8257
8258         case SIOCSMIIREG:
8259                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8260                         break;                  /* We have no PHY */
8261
8262                 if (!capable(CAP_NET_ADMIN))
8263                         return -EPERM;
8264
8265                 spin_lock_bh(&tp->lock);
8266                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8267                 spin_unlock_bh(&tp->lock);
8268
8269                 return err;
8270
8271         default:
8272                 /* do nothing */
8273                 break;
8274         }
8275         return -EOPNOTSUPP;
8276 }
8277
8278 #if TG3_VLAN_TAG_USED
8279 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8280 {
8281         struct tg3 *tp = netdev_priv(dev);
8282
8283         tg3_full_lock(tp, 0);
8284
8285         tp->vlgrp = grp;
8286
8287         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8288         __tg3_set_rx_mode(dev);
8289
8290         tg3_full_unlock(tp);
8291 }
8292
8293 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8294 {
8295         struct tg3 *tp = netdev_priv(dev);
8296
8297         tg3_full_lock(tp, 0);
8298         if (tp->vlgrp)
8299                 tp->vlgrp->vlan_devices[vid] = NULL;
8300         tg3_full_unlock(tp);
8301 }
8302 #endif
8303
8304 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8305 {
8306         struct tg3 *tp = netdev_priv(dev);
8307
8308         memcpy(ec, &tp->coal, sizeof(*ec));
8309         return 0;
8310 }
8311
8312 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8313 {
8314         struct tg3 *tp = netdev_priv(dev);
8315         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8316         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8317
8318         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8319                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8320                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8321                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8322                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8323         }
8324
8325         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8326             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8327             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8328             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8329             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8330             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8331             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8332             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8333             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8334             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8335                 return -EINVAL;
8336
8337         /* No rx interrupts will be generated if both are zero */
8338         if ((ec->rx_coalesce_usecs == 0) &&
8339             (ec->rx_max_coalesced_frames == 0))
8340                 return -EINVAL;
8341
8342         /* No tx interrupts will be generated if both are zero */
8343         if ((ec->tx_coalesce_usecs == 0) &&
8344             (ec->tx_max_coalesced_frames == 0))
8345                 return -EINVAL;
8346
8347         /* Only copy relevant parameters, ignore all others. */
8348         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8349         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8350         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8351         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8352         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8353         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8354         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8355         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8356         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8357
8358         if (netif_running(dev)) {
8359                 tg3_full_lock(tp, 0);
8360                 __tg3_set_coalesce(tp, &tp->coal);
8361                 tg3_full_unlock(tp);
8362         }
8363         return 0;
8364 }
8365
8366 static struct ethtool_ops tg3_ethtool_ops = {
8367         .get_settings           = tg3_get_settings,
8368         .set_settings           = tg3_set_settings,
8369         .get_drvinfo            = tg3_get_drvinfo,
8370         .get_regs_len           = tg3_get_regs_len,
8371         .get_regs               = tg3_get_regs,
8372         .get_wol                = tg3_get_wol,
8373         .set_wol                = tg3_set_wol,
8374         .get_msglevel           = tg3_get_msglevel,
8375         .set_msglevel           = tg3_set_msglevel,
8376         .nway_reset             = tg3_nway_reset,
8377         .get_link               = ethtool_op_get_link,
8378         .get_eeprom_len         = tg3_get_eeprom_len,
8379         .get_eeprom             = tg3_get_eeprom,
8380         .set_eeprom             = tg3_set_eeprom,
8381         .get_ringparam          = tg3_get_ringparam,
8382         .set_ringparam          = tg3_set_ringparam,
8383         .get_pauseparam         = tg3_get_pauseparam,
8384         .set_pauseparam         = tg3_set_pauseparam,
8385         .get_rx_csum            = tg3_get_rx_csum,
8386         .set_rx_csum            = tg3_set_rx_csum,
8387         .get_tx_csum            = ethtool_op_get_tx_csum,
8388         .set_tx_csum            = tg3_set_tx_csum,
8389         .get_sg                 = ethtool_op_get_sg,
8390         .set_sg                 = ethtool_op_set_sg,
8391 #if TG3_TSO_SUPPORT != 0
8392         .get_tso                = ethtool_op_get_tso,
8393         .set_tso                = tg3_set_tso,
8394 #endif
8395         .self_test_count        = tg3_get_test_count,
8396         .self_test              = tg3_self_test,
8397         .get_strings            = tg3_get_strings,
8398         .phys_id                = tg3_phys_id,
8399         .get_stats_count        = tg3_get_stats_count,
8400         .get_ethtool_stats      = tg3_get_ethtool_stats,
8401         .get_coalesce           = tg3_get_coalesce,
8402         .set_coalesce           = tg3_set_coalesce,
8403         .get_perm_addr          = ethtool_op_get_perm_addr,
8404 };
8405
8406 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8407 {
8408         u32 cursize, val;
8409
8410         tp->nvram_size = EEPROM_CHIP_SIZE;
8411
8412         if (tg3_nvram_read(tp, 0, &val) != 0)
8413                 return;
8414
8415         if (swab32(val) != TG3_EEPROM_MAGIC)
8416                 return;
8417
8418         /*
8419          * Size the chip by reading offsets at increasing powers of two.
8420          * When we encounter our validation signature, we know the addressing
8421          * has wrapped around, and thus have our chip size.
8422          */
8423         cursize = 0x800;
8424
8425         while (cursize < tp->nvram_size) {
8426                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8427                         return;
8428
8429                 if (swab32(val) == TG3_EEPROM_MAGIC)
8430                         break;
8431
8432                 cursize <<= 1;
8433         }
8434
8435         tp->nvram_size = cursize;
8436 }
8437                 
8438 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8439 {
8440         u32 val;
8441
8442         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8443                 if (val != 0) {
8444                         tp->nvram_size = (val >> 16) * 1024;
8445                         return;
8446                 }
8447         }
8448         tp->nvram_size = 0x20000;
8449 }
8450
8451 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8452 {
8453         u32 nvcfg1;
8454
8455         nvcfg1 = tr32(NVRAM_CFG1);
8456         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8457                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8458         }
8459         else {
8460                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8461                 tw32(NVRAM_CFG1, nvcfg1);
8462         }
8463
8464         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8465             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8466                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8467                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8468                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8469                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8470                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8471                                 break;
8472                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8473                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8474                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8475                                 break;
8476                         case FLASH_VENDOR_ATMEL_EEPROM:
8477                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8478                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8479                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8480                                 break;
8481                         case FLASH_VENDOR_ST:
8482                                 tp->nvram_jedecnum = JEDEC_ST;
8483                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8484                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8485                                 break;
8486                         case FLASH_VENDOR_SAIFUN:
8487                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8488                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8489                                 break;
8490                         case FLASH_VENDOR_SST_SMALL:
8491                         case FLASH_VENDOR_SST_LARGE:
8492                                 tp->nvram_jedecnum = JEDEC_SST;
8493                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8494                                 break;
8495                 }
8496         }
8497         else {
8498                 tp->nvram_jedecnum = JEDEC_ATMEL;
8499                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8500                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8501         }
8502 }
8503
8504 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8505 {
8506         u32 nvcfg1;
8507
8508         nvcfg1 = tr32(NVRAM_CFG1);
8509
8510         /* NVRAM protection for TPM */
8511         if (nvcfg1 & (1 << 27))
8512                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8513
8514         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8515                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8516                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8517                         tp->nvram_jedecnum = JEDEC_ATMEL;
8518                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8519                         break;
8520                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8521                         tp->nvram_jedecnum = JEDEC_ATMEL;
8522                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8523                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8524                         break;
8525                 case FLASH_5752VENDOR_ST_M45PE10:
8526                 case FLASH_5752VENDOR_ST_M45PE20:
8527                 case FLASH_5752VENDOR_ST_M45PE40:
8528                         tp->nvram_jedecnum = JEDEC_ST;
8529                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8530                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8531                         break;
8532         }
8533
8534         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8535                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8536                         case FLASH_5752PAGE_SIZE_256:
8537                                 tp->nvram_pagesize = 256;
8538                                 break;
8539                         case FLASH_5752PAGE_SIZE_512:
8540                                 tp->nvram_pagesize = 512;
8541                                 break;
8542                         case FLASH_5752PAGE_SIZE_1K:
8543                                 tp->nvram_pagesize = 1024;
8544                                 break;
8545                         case FLASH_5752PAGE_SIZE_2K:
8546                                 tp->nvram_pagesize = 2048;
8547                                 break;
8548                         case FLASH_5752PAGE_SIZE_4K:
8549                                 tp->nvram_pagesize = 4096;
8550                                 break;
8551                         case FLASH_5752PAGE_SIZE_264:
8552                                 tp->nvram_pagesize = 264;
8553                                 break;
8554                 }
8555         }
8556         else {
8557                 /* For eeprom, set pagesize to maximum eeprom size */
8558                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8559
8560                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8561                 tw32(NVRAM_CFG1, nvcfg1);
8562         }
8563 }
8564
8565 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8566 static void __devinit tg3_nvram_init(struct tg3 *tp)
8567 {
8568         int j;
8569
8570         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8571                 return;
8572
8573         tw32_f(GRC_EEPROM_ADDR,
8574              (EEPROM_ADDR_FSM_RESET |
8575               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8576                EEPROM_ADDR_CLKPERD_SHIFT)));
8577
8578         /* XXX schedule_timeout() ... */
8579         for (j = 0; j < 100; j++)
8580                 udelay(10);
8581
8582         /* Enable seeprom accesses. */
8583         tw32_f(GRC_LOCAL_CTRL,
8584              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8585         udelay(100);
8586
8587         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8588             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8589                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8590
8591                 tg3_nvram_lock(tp);
8592                 tg3_enable_nvram_access(tp);
8593
8594                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8595                         tg3_get_5752_nvram_info(tp);
8596                 else
8597                         tg3_get_nvram_info(tp);
8598
8599                 tg3_get_nvram_size(tp);
8600
8601                 tg3_disable_nvram_access(tp);
8602                 tg3_nvram_unlock(tp);
8603
8604         } else {
8605                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8606
8607                 tg3_get_eeprom_size(tp);
8608         }
8609 }
8610
8611 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8612                                         u32 offset, u32 *val)
8613 {
8614         u32 tmp;
8615         int i;
8616
8617         if (offset > EEPROM_ADDR_ADDR_MASK ||
8618             (offset % 4) != 0)
8619                 return -EINVAL;
8620
8621         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8622                                         EEPROM_ADDR_DEVID_MASK |
8623                                         EEPROM_ADDR_READ);
8624         tw32(GRC_EEPROM_ADDR,
8625              tmp |
8626              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8627              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8628               EEPROM_ADDR_ADDR_MASK) |
8629              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8630
8631         for (i = 0; i < 10000; i++) {
8632                 tmp = tr32(GRC_EEPROM_ADDR);
8633
8634                 if (tmp & EEPROM_ADDR_COMPLETE)
8635                         break;
8636                 udelay(100);
8637         }
8638         if (!(tmp & EEPROM_ADDR_COMPLETE))
8639                 return -EBUSY;
8640
8641         *val = tr32(GRC_EEPROM_DATA);
8642         return 0;
8643 }
8644
8645 #define NVRAM_CMD_TIMEOUT 10000
8646
8647 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8648 {
8649         int i;
8650
8651         tw32(NVRAM_CMD, nvram_cmd);
8652         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8653                 udelay(10);
8654                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8655                         udelay(10);
8656                         break;
8657                 }
8658         }
8659         if (i == NVRAM_CMD_TIMEOUT) {
8660                 return -EBUSY;
8661         }
8662         return 0;
8663 }
8664
8665 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8666 {
8667         int ret;
8668
8669         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8670                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8671                 return -EINVAL;
8672         }
8673
8674         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8675                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8676
8677         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8678                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8679                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8680
8681                 offset = ((offset / tp->nvram_pagesize) <<
8682                           ATMEL_AT45DB0X1B_PAGE_POS) +
8683                         (offset % tp->nvram_pagesize);
8684         }
8685
8686         if (offset > NVRAM_ADDR_MSK)
8687                 return -EINVAL;
8688
8689         tg3_nvram_lock(tp);
8690
8691         tg3_enable_nvram_access(tp);
8692
8693         tw32(NVRAM_ADDR, offset);
8694         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8695                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8696
8697         if (ret == 0)
8698                 *val = swab32(tr32(NVRAM_RDDATA));
8699
8700         tg3_disable_nvram_access(tp);
8701
8702         tg3_nvram_unlock(tp);
8703
8704         return ret;
8705 }
8706
8707 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8708                                     u32 offset, u32 len, u8 *buf)
8709 {
8710         int i, j, rc = 0;
8711         u32 val;
8712
8713         for (i = 0; i < len; i += 4) {
8714                 u32 addr, data;
8715
8716                 addr = offset + i;
8717
8718                 memcpy(&data, buf + i, 4);
8719
8720                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8721
8722                 val = tr32(GRC_EEPROM_ADDR);
8723                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8724
8725                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8726                         EEPROM_ADDR_READ);
8727                 tw32(GRC_EEPROM_ADDR, val |
8728                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8729                         (addr & EEPROM_ADDR_ADDR_MASK) |
8730                         EEPROM_ADDR_START |
8731                         EEPROM_ADDR_WRITE);
8732                 
8733                 for (j = 0; j < 10000; j++) {
8734                         val = tr32(GRC_EEPROM_ADDR);
8735
8736                         if (val & EEPROM_ADDR_COMPLETE)
8737                                 break;
8738                         udelay(100);
8739                 }
8740                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8741                         rc = -EBUSY;
8742                         break;
8743                 }
8744         }
8745
8746         return rc;
8747 }
8748
8749 /* offset and length are dword aligned */
8750 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8751                 u8 *buf)
8752 {
8753         int ret = 0;
8754         u32 pagesize = tp->nvram_pagesize;
8755         u32 pagemask = pagesize - 1;
8756         u32 nvram_cmd;
8757         u8 *tmp;
8758
8759         tmp = kmalloc(pagesize, GFP_KERNEL);
8760         if (tmp == NULL)
8761                 return -ENOMEM;
8762
8763         while (len) {
8764                 int j;
8765                 u32 phy_addr, page_off, size;
8766
8767                 phy_addr = offset & ~pagemask;
8768         
8769                 for (j = 0; j < pagesize; j += 4) {
8770                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8771                                                 (u32 *) (tmp + j))))
8772                                 break;
8773                 }
8774                 if (ret)
8775                         break;
8776
8777                 page_off = offset & pagemask;
8778                 size = pagesize;
8779                 if (len < size)
8780                         size = len;
8781
8782                 len -= size;
8783
8784                 memcpy(tmp + page_off, buf, size);
8785
8786                 offset = offset + (pagesize - page_off);
8787
8788                 /* Nvram lock released by tg3_nvram_read() above,
8789                  * so need to get it again.
8790                  */
8791                 tg3_nvram_lock(tp);
8792                 tg3_enable_nvram_access(tp);
8793
8794                 /*
8795                  * Before we can erase the flash page, we need
8796                  * to issue a special "write enable" command.
8797                  */
8798                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8799
8800                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8801                         break;
8802
8803                 /* Erase the target page */
8804                 tw32(NVRAM_ADDR, phy_addr);
8805
8806                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8807                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8808
8809                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8810                         break;
8811
8812                 /* Issue another write enable to start the write. */
8813                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8814
8815                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8816                         break;
8817
8818                 for (j = 0; j < pagesize; j += 4) {
8819                         u32 data;
8820
8821                         data = *((u32 *) (tmp + j));
8822                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8823
8824                         tw32(NVRAM_ADDR, phy_addr + j);
8825
8826                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8827                                 NVRAM_CMD_WR;
8828
8829                         if (j == 0)
8830                                 nvram_cmd |= NVRAM_CMD_FIRST;
8831                         else if (j == (pagesize - 4))
8832                                 nvram_cmd |= NVRAM_CMD_LAST;
8833
8834                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8835                                 break;
8836                 }
8837                 if (ret)
8838                         break;
8839         }
8840
8841         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8842         tg3_nvram_exec_cmd(tp, nvram_cmd);
8843
8844         kfree(tmp);
8845
8846         return ret;
8847 }
8848
8849 /* offset and length are dword aligned */
8850 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8851                 u8 *buf)
8852 {
8853         int i, ret = 0;
8854
8855         for (i = 0; i < len; i += 4, offset += 4) {
8856                 u32 data, page_off, phy_addr, nvram_cmd;
8857
8858                 memcpy(&data, buf + i, 4);
8859                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8860
8861                 page_off = offset % tp->nvram_pagesize;
8862
8863                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8864                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8865
8866                         phy_addr = ((offset / tp->nvram_pagesize) <<
8867                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8868                 }
8869                 else {
8870                         phy_addr = offset;
8871                 }
8872
8873                 tw32(NVRAM_ADDR, phy_addr);
8874
8875                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8876
8877                 if ((page_off == 0) || (i == 0))
8878                         nvram_cmd |= NVRAM_CMD_FIRST;
8879                 else if (page_off == (tp->nvram_pagesize - 4))
8880                         nvram_cmd |= NVRAM_CMD_LAST;
8881
8882                 if (i == (len - 4))
8883                         nvram_cmd |= NVRAM_CMD_LAST;
8884
8885                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8886                     (tp->nvram_jedecnum == JEDEC_ST) &&
8887                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8888
8889                         if ((ret = tg3_nvram_exec_cmd(tp,
8890                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8891                                 NVRAM_CMD_DONE)))
8892
8893                                 break;
8894                 }
8895                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8896                         /* We always do complete word writes to eeprom. */
8897                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8898                 }
8899
8900                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8901                         break;
8902         }
8903         return ret;
8904 }
8905
8906 /* offset and length are dword aligned */
8907 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8908 {
8909         int ret;
8910
8911         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8912                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8913                 return -EINVAL;
8914         }
8915
8916         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8917                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8918                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8919                 udelay(40);
8920         }
8921
8922         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8923                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8924         }
8925         else {
8926                 u32 grc_mode;
8927
8928                 tg3_nvram_lock(tp);
8929
8930                 tg3_enable_nvram_access(tp);
8931                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8932                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8933                         tw32(NVRAM_WRITE1, 0x406);
8934
8935                 grc_mode = tr32(GRC_MODE);
8936                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8937
8938                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8939                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8940
8941                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8942                                 buf);
8943                 }
8944                 else {
8945                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8946                                 buf);
8947                 }
8948
8949                 grc_mode = tr32(GRC_MODE);
8950                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8951
8952                 tg3_disable_nvram_access(tp);
8953                 tg3_nvram_unlock(tp);
8954         }
8955
8956         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8957                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8958                 udelay(40);
8959         }
8960
8961         return ret;
8962 }
8963
8964 struct subsys_tbl_ent {
8965         u16 subsys_vendor, subsys_devid;
8966         u32 phy_id;
8967 };
8968
8969 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8970         /* Broadcom boards. */
8971         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8972         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8973         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8974         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8975         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8976         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8977         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8978         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8979         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8980         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8981         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8982
8983         /* 3com boards. */
8984         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8985         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8986         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8987         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8988         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8989
8990         /* DELL boards. */
8991         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8992         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8993         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8994         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8995
8996         /* Compaq boards. */
8997         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8998         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8999         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9000         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9001         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9002
9003         /* IBM boards. */
9004         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9005 };
9006
9007 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9008 {
9009         int i;
9010
9011         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9012                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9013                      tp->pdev->subsystem_vendor) &&
9014                     (subsys_id_to_phy_id[i].subsys_devid ==
9015                      tp->pdev->subsystem_device))
9016                         return &subsys_id_to_phy_id[i];
9017         }
9018         return NULL;
9019 }
9020
9021 /* Since this function may be called in D3-hot power state during
9022  * tg3_init_one(), only config cycles are allowed.
9023  */
9024 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9025 {
9026         u32 val;
9027
9028         /* Make sure register accesses (indirect or otherwise)
9029          * will function correctly.
9030          */
9031         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9032                                tp->misc_host_ctrl);
9033
9034         tp->phy_id = PHY_ID_INVALID;
9035         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9036
9037         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9038         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9039                 u32 nic_cfg, led_cfg;
9040                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9041                 int eeprom_phy_serdes = 0;
9042
9043                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9044                 tp->nic_sram_data_cfg = nic_cfg;
9045
9046                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9047                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9048                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9049                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9050                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9051                     (ver > 0) && (ver < 0x100))
9052                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9053
9054                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9055                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9056                         eeprom_phy_serdes = 1;
9057
9058                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9059                 if (nic_phy_id != 0) {
9060                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9061                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9062
9063                         eeprom_phy_id  = (id1 >> 16) << 10;
9064                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9065                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9066                 } else
9067                         eeprom_phy_id = 0;
9068
9069                 tp->phy_id = eeprom_phy_id;
9070                 if (eeprom_phy_serdes) {
9071                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9072                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9073                         else
9074                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9075                 }
9076
9077                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9078                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9079                                     SHASTA_EXT_LED_MODE_MASK);
9080                 else
9081                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9082
9083                 switch (led_cfg) {
9084                 default:
9085                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9086                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9087                         break;
9088
9089                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9090                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9091                         break;
9092
9093                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9094                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9095
9096                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9097                          * read on some older 5700/5701 bootcode.
9098                          */
9099                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9100                             ASIC_REV_5700 ||
9101                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9102                             ASIC_REV_5701)
9103                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9104
9105                         break;
9106
9107                 case SHASTA_EXT_LED_SHARED:
9108                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9109                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9110                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9111                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9112                                                  LED_CTRL_MODE_PHY_2);
9113                         break;
9114
9115                 case SHASTA_EXT_LED_MAC:
9116                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9117                         break;
9118
9119                 case SHASTA_EXT_LED_COMBO:
9120                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9121                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9122                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9123                                                  LED_CTRL_MODE_PHY_2);
9124                         break;
9125
9126                 };
9127
9128                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9129                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9130                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9131                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9132
9133                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9134                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9135                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9136                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9137
9138                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9139                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9140                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9141                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9142                 }
9143                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9144                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9145
9146                 if (cfg2 & (1 << 17))
9147                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9148
9149                 /* serdes signal pre-emphasis in register 0x590 set by */
9150                 /* bootcode if bit 18 is set */
9151                 if (cfg2 & (1 << 18))
9152                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9153         }
9154 }
9155
9156 static int __devinit tg3_phy_probe(struct tg3 *tp)
9157 {
9158         u32 hw_phy_id_1, hw_phy_id_2;
9159         u32 hw_phy_id, hw_phy_id_masked;
9160         int err;
9161
9162         /* Reading the PHY ID register can conflict with ASF
9163          * firwmare access to the PHY hardware.
9164          */
9165         err = 0;
9166         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9167                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9168         } else {
9169                 /* Now read the physical PHY_ID from the chip and verify
9170                  * that it is sane.  If it doesn't look good, we fall back
9171                  * to either the hard-coded table based PHY_ID and failing
9172                  * that the value found in the eeprom area.
9173                  */
9174                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9175                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9176
9177                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9178                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9179                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9180
9181                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9182         }
9183
9184         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9185                 tp->phy_id = hw_phy_id;
9186                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9187                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9188                 else
9189                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9190         } else {
9191                 if (tp->phy_id != PHY_ID_INVALID) {
9192                         /* Do nothing, phy ID already set up in
9193                          * tg3_get_eeprom_hw_cfg().
9194                          */
9195                 } else {
9196                         struct subsys_tbl_ent *p;
9197
9198                         /* No eeprom signature?  Try the hardcoded
9199                          * subsys device table.
9200                          */
9201                         p = lookup_by_subsys(tp);
9202                         if (!p)
9203                                 return -ENODEV;
9204
9205                         tp->phy_id = p->phy_id;
9206                         if (!tp->phy_id ||
9207                             tp->phy_id == PHY_ID_BCM8002)
9208                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9209                 }
9210         }
9211
9212         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9213             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9214                 u32 bmsr, adv_reg, tg3_ctrl;
9215
9216                 tg3_readphy(tp, MII_BMSR, &bmsr);
9217                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9218                     (bmsr & BMSR_LSTATUS))
9219                         goto skip_phy_reset;
9220                     
9221                 err = tg3_phy_reset(tp);
9222                 if (err)
9223                         return err;
9224
9225                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9226                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9227                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9228                 tg3_ctrl = 0;
9229                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9230                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9231                                     MII_TG3_CTRL_ADV_1000_FULL);
9232                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9233                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9234                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9235                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9236                 }
9237
9238                 if (!tg3_copper_is_advertising_all(tp)) {
9239                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9240
9241                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9242                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9243
9244                         tg3_writephy(tp, MII_BMCR,
9245                                      BMCR_ANENABLE | BMCR_ANRESTART);
9246                 }
9247                 tg3_phy_set_wirespeed(tp);
9248
9249                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9250                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9251                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9252         }
9253
9254 skip_phy_reset:
9255         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9256                 err = tg3_init_5401phy_dsp(tp);
9257                 if (err)
9258                         return err;
9259         }
9260
9261         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9262                 err = tg3_init_5401phy_dsp(tp);
9263         }
9264
9265         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9266                 tp->link_config.advertising =
9267                         (ADVERTISED_1000baseT_Half |
9268                          ADVERTISED_1000baseT_Full |
9269                          ADVERTISED_Autoneg |
9270                          ADVERTISED_FIBRE);
9271         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9272                 tp->link_config.advertising &=
9273                         ~(ADVERTISED_1000baseT_Half |
9274                           ADVERTISED_1000baseT_Full);
9275
9276         return err;
9277 }
9278
9279 static void __devinit tg3_read_partno(struct tg3 *tp)
9280 {
9281         unsigned char vpd_data[256];
9282         int i;
9283
9284         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9285                 /* Sun decided not to put the necessary bits in the
9286                  * NVRAM of their onboard tg3 parts :(
9287                  */
9288                 strcpy(tp->board_part_number, "Sun 570X");
9289                 return;
9290         }
9291
9292         for (i = 0; i < 256; i += 4) {
9293                 u32 tmp;
9294
9295                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9296                         goto out_not_found;
9297
9298                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9299                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9300                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9301                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9302         }
9303
9304         /* Now parse and find the part number. */
9305         for (i = 0; i < 256; ) {
9306                 unsigned char val = vpd_data[i];
9307                 int block_end;
9308
9309                 if (val == 0x82 || val == 0x91) {
9310                         i = (i + 3 +
9311                              (vpd_data[i + 1] +
9312                               (vpd_data[i + 2] << 8)));
9313                         continue;
9314                 }
9315
9316                 if (val != 0x90)
9317                         goto out_not_found;
9318
9319                 block_end = (i + 3 +
9320                              (vpd_data[i + 1] +
9321                               (vpd_data[i + 2] << 8)));
9322                 i += 3;
9323                 while (i < block_end) {
9324                         if (vpd_data[i + 0] == 'P' &&
9325                             vpd_data[i + 1] == 'N') {
9326                                 int partno_len = vpd_data[i + 2];
9327
9328                                 if (partno_len > 24)
9329                                         goto out_not_found;
9330
9331                                 memcpy(tp->board_part_number,
9332                                        &vpd_data[i + 3],
9333                                        partno_len);
9334
9335                                 /* Success. */
9336                                 return;
9337                         }
9338                 }
9339
9340                 /* Part number not found. */
9341                 goto out_not_found;
9342         }
9343
9344 out_not_found:
9345         strcpy(tp->board_part_number, "none");
9346 }
9347
9348 #ifdef CONFIG_SPARC64
9349 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9350 {
9351         struct pci_dev *pdev = tp->pdev;
9352         struct pcidev_cookie *pcp = pdev->sysdata;
9353
9354         if (pcp != NULL) {
9355                 int node = pcp->prom_node;
9356                 u32 venid;
9357                 int err;
9358
9359                 err = prom_getproperty(node, "subsystem-vendor-id",
9360                                        (char *) &venid, sizeof(venid));
9361                 if (err == 0 || err == -1)
9362                         return 0;
9363                 if (venid == PCI_VENDOR_ID_SUN)
9364                         return 1;
9365         }
9366         return 0;
9367 }
9368 #endif
9369
9370 static int __devinit tg3_get_invariants(struct tg3 *tp)
9371 {
9372         static struct pci_device_id write_reorder_chipsets[] = {
9373                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9374                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9375                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9376                              PCI_DEVICE_ID_VIA_8385_0) },
9377                 { },
9378         };
9379         u32 misc_ctrl_reg;
9380         u32 cacheline_sz_reg;
9381         u32 pci_state_reg, grc_misc_cfg;
9382         u32 val;
9383         u16 pci_cmd;
9384         int err;
9385
9386 #ifdef CONFIG_SPARC64
9387         if (tg3_is_sun_570X(tp))
9388                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9389 #endif
9390
9391         /* Force memory write invalidate off.  If we leave it on,
9392          * then on 5700_BX chips we have to enable a workaround.
9393          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9394          * to match the cacheline size.  The Broadcom driver have this
9395          * workaround but turns MWI off all the times so never uses
9396          * it.  This seems to suggest that the workaround is insufficient.
9397          */
9398         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9399         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9400         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9401
9402         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9403          * has the register indirect write enable bit set before
9404          * we try to access any of the MMIO registers.  It is also
9405          * critical that the PCI-X hw workaround situation is decided
9406          * before that as well.
9407          */
9408         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9409                               &misc_ctrl_reg);
9410
9411         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9412                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9413
9414         /* Wrong chip ID in 5752 A0. This code can be removed later
9415          * as A0 is not in production.
9416          */
9417         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9418                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9419
9420         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9421          * we need to disable memory and use config. cycles
9422          * only to access all registers. The 5702/03 chips
9423          * can mistakenly decode the special cycles from the
9424          * ICH chipsets as memory write cycles, causing corruption
9425          * of register and memory space. Only certain ICH bridges
9426          * will drive special cycles with non-zero data during the
9427          * address phase which can fall within the 5703's address
9428          * range. This is not an ICH bug as the PCI spec allows
9429          * non-zero address during special cycles. However, only
9430          * these ICH bridges are known to drive non-zero addresses
9431          * during special cycles.
9432          *
9433          * Since special cycles do not cross PCI bridges, we only
9434          * enable this workaround if the 5703 is on the secondary
9435          * bus of these ICH bridges.
9436          */
9437         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9438             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9439                 static struct tg3_dev_id {
9440                         u32     vendor;
9441                         u32     device;
9442                         u32     rev;
9443                 } ich_chipsets[] = {
9444                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9445                           PCI_ANY_ID },
9446                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9447                           PCI_ANY_ID },
9448                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9449                           0xa },
9450                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9451                           PCI_ANY_ID },
9452                         { },
9453                 };
9454                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9455                 struct pci_dev *bridge = NULL;
9456
9457                 while (pci_id->vendor != 0) {
9458                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9459                                                 bridge);
9460                         if (!bridge) {
9461                                 pci_id++;
9462                                 continue;
9463                         }
9464                         if (pci_id->rev != PCI_ANY_ID) {
9465                                 u8 rev;
9466
9467                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9468                                                      &rev);
9469                                 if (rev > pci_id->rev)
9470                                         continue;
9471                         }
9472                         if (bridge->subordinate &&
9473                             (bridge->subordinate->number ==
9474                              tp->pdev->bus->number)) {
9475
9476                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9477                                 pci_dev_put(bridge);
9478                                 break;
9479                         }
9480                 }
9481         }
9482
9483         /* Find msi capability. */
9484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9486                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9487                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9488         }
9489
9490         /* Initialize misc host control in PCI block. */
9491         tp->misc_host_ctrl |= (misc_ctrl_reg &
9492                                MISC_HOST_CTRL_CHIPREV);
9493         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9494                                tp->misc_host_ctrl);
9495
9496         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9497                               &cacheline_sz_reg);
9498
9499         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9500         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9501         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9502         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9503
9504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9505             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9506             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9507                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9508
9509         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9510             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9511                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9512
9513         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9514                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9515
9516         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9517             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9518             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9519                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9520
9521         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9522                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9523
9524         /* If we have an AMD 762 or VIA K8T800 chipset, write
9525          * reordering to the mailbox registers done by the host
9526          * controller can cause major troubles.  We read back from
9527          * every mailbox register write to force the writes to be
9528          * posted to the chip in order.
9529          */
9530         if (pci_dev_present(write_reorder_chipsets) &&
9531             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9532                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9533
9534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9535             tp->pci_lat_timer < 64) {
9536                 tp->pci_lat_timer = 64;
9537
9538                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9539                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9540                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9541                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9542
9543                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9544                                        cacheline_sz_reg);
9545         }
9546
9547         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9548                               &pci_state_reg);
9549
9550         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9551                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9552
9553                 /* If this is a 5700 BX chipset, and we are in PCI-X
9554                  * mode, enable register write workaround.
9555                  *
9556                  * The workaround is to use indirect register accesses
9557                  * for all chip writes not to mailbox registers.
9558                  */
9559                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9560                         u32 pm_reg;
9561                         u16 pci_cmd;
9562
9563                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9564
9565                         /* The chip can have it's power management PCI config
9566                          * space registers clobbered due to this bug.
9567                          * So explicitly force the chip into D0 here.
9568                          */
9569                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9570                                               &pm_reg);
9571                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9572                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9573                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9574                                                pm_reg);
9575
9576                         /* Also, force SERR#/PERR# in PCI command. */
9577                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9578                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9579                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9580                 }
9581         }
9582
9583         /* 5700 BX chips need to have their TX producer index mailboxes
9584          * written twice to workaround a bug.
9585          */
9586         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9587                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9588
9589         /* Back to back register writes can cause problems on this chip,
9590          * the workaround is to read back all reg writes except those to
9591          * mailbox regs.  See tg3_write_indirect_reg32().
9592          *
9593          * PCI Express 5750_A0 rev chips need this workaround too.
9594          */
9595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9596             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9597              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9598                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9599
9600         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9601                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9602         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9603                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9604
9605         /* Chip-specific fixup from Broadcom driver */
9606         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9607             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9608                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9609                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9610         }
9611
9612         /* Default fast path register access methods */
9613         tp->read32 = tg3_read32;
9614         tp->write32 = tg3_write32;
9615         tp->read32_mbox = tg3_read32;
9616         tp->write32_mbox = tg3_write32;
9617         tp->write32_tx_mbox = tg3_write32;
9618         tp->write32_rx_mbox = tg3_write32;
9619
9620         /* Various workaround register access methods */
9621         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9622                 tp->write32 = tg3_write_indirect_reg32;
9623         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9624                 tp->write32 = tg3_write_flush_reg32;
9625
9626         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9627             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9628                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9629                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9630                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9631         }
9632
9633         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9634                 tp->read32 = tg3_read_indirect_reg32;
9635                 tp->write32 = tg3_write_indirect_reg32;
9636                 tp->read32_mbox = tg3_read_indirect_mbox;
9637                 tp->write32_mbox = tg3_write_indirect_mbox;
9638                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9639                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9640
9641                 iounmap(tp->regs);
9642                 tp->regs = NULL;
9643
9644                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9645                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9646                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9647         }
9648
9649         /* Get eeprom hw config before calling tg3_set_power_state().
9650          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9651          * determined before calling tg3_set_power_state() so that
9652          * we know whether or not to switch out of Vaux power.
9653          * When the flag is set, it means that GPIO1 is used for eeprom
9654          * write protect and also implies that it is a LOM where GPIOs
9655          * are not used to switch power.
9656          */ 
9657         tg3_get_eeprom_hw_cfg(tp);
9658
9659         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9660          * GPIO1 driven high will bring 5700's external PHY out of reset.
9661          * It is also used as eeprom write protect on LOMs.
9662          */
9663         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9664         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9665             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9666                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9667                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9668         /* Unused GPIO3 must be driven as output on 5752 because there
9669          * are no pull-up resistors on unused GPIO pins.
9670          */
9671         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9672                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9673
9674         /* Force the chip into D0. */
9675         err = tg3_set_power_state(tp, 0);
9676         if (err) {
9677                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9678                        pci_name(tp->pdev));
9679                 return err;
9680         }
9681
9682         /* 5700 B0 chips do not support checksumming correctly due
9683          * to hardware bugs.
9684          */
9685         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9686                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9687
9688         /* Pseudo-header checksum is done by hardware logic and not
9689          * the offload processers, so make the chip do the pseudo-
9690          * header checksums on receive.  For transmit it is more
9691          * convenient to do the pseudo-header checksum in software
9692          * as Linux does that on transmit for us in all cases.
9693          */
9694         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9695         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9696
9697         /* Derive initial jumbo mode from MTU assigned in
9698          * ether_setup() via the alloc_etherdev() call
9699          */
9700         if (tp->dev->mtu > ETH_DATA_LEN &&
9701             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9702                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9703
9704         /* Determine WakeOnLan speed to use. */
9705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9706             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9707             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9708             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9709                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9710         } else {
9711                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9712         }
9713
9714         /* A few boards don't want Ethernet@WireSpeed phy feature */
9715         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9716             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9717              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9718              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9719             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9720                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9721
9722         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9723             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9724                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9725         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9726                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9727
9728         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9729                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9730
9731         tp->coalesce_mode = 0;
9732         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9733             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9734                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9735
9736         /* Initialize MAC MI mode, polling disabled. */
9737         tw32_f(MAC_MI_MODE, tp->mi_mode);
9738         udelay(80);
9739
9740         /* Initialize data/descriptor byte/word swapping. */
9741         val = tr32(GRC_MODE);
9742         val &= GRC_MODE_HOST_STACKUP;
9743         tw32(GRC_MODE, val | tp->grc_mode);
9744
9745         tg3_switch_clocks(tp);
9746
9747         /* Clear this out for sanity. */
9748         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9749
9750         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9751                               &pci_state_reg);
9752         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9753             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9754                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9755
9756                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9757                     chiprevid == CHIPREV_ID_5701_B0 ||
9758                     chiprevid == CHIPREV_ID_5701_B2 ||
9759                     chiprevid == CHIPREV_ID_5701_B5) {
9760                         void __iomem *sram_base;
9761
9762                         /* Write some dummy words into the SRAM status block
9763                          * area, see if it reads back correctly.  If the return
9764                          * value is bad, force enable the PCIX workaround.
9765                          */
9766                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9767
9768                         writel(0x00000000, sram_base);
9769                         writel(0x00000000, sram_base + 4);
9770                         writel(0xffffffff, sram_base + 4);
9771                         if (readl(sram_base) != 0x00000000)
9772                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9773                 }
9774         }
9775
9776         udelay(50);
9777         tg3_nvram_init(tp);
9778
9779         grc_misc_cfg = tr32(GRC_MISC_CFG);
9780         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9781
9782         /* Broadcom's driver says that CIOBE multisplit has a bug */
9783 #if 0
9784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9785             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9786                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9787                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9788         }
9789 #endif
9790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9791             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9792              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9793                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9794
9795         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9796             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9797                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9798         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9799                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9800                                       HOSTCC_MODE_CLRTICK_TXBD);
9801
9802                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9803                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9804                                        tp->misc_host_ctrl);
9805         }
9806
9807         /* these are limited to 10/100 only */
9808         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9809              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9810             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9811              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9812              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9813               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9814               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9815             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9816              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9817               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9818                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9819
9820         err = tg3_phy_probe(tp);
9821         if (err) {
9822                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9823                        pci_name(tp->pdev), err);
9824                 /* ... but do not return immediately ... */
9825         }
9826
9827         tg3_read_partno(tp);
9828
9829         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9830                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9831         } else {
9832                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9833                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9834                 else
9835                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9836         }
9837
9838         /* 5700 {AX,BX} chips have a broken status block link
9839          * change bit implementation, so we must use the
9840          * status register in those cases.
9841          */
9842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9843                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9844         else
9845                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9846
9847         /* The led_ctrl is set during tg3_phy_probe, here we might
9848          * have to force the link status polling mechanism based
9849          * upon subsystem IDs.
9850          */
9851         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9852             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9853                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9854                                   TG3_FLAG_USE_LINKCHG_REG);
9855         }
9856
9857         /* For all SERDES we poll the MAC status register. */
9858         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9859                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9860         else
9861                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9862
9863         /* It seems all chips can get confused if TX buffers
9864          * straddle the 4GB address boundary in some cases.
9865          */
9866         tp->dev->hard_start_xmit = tg3_start_xmit;
9867
9868         tp->rx_offset = 2;
9869         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9870             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9871                 tp->rx_offset = 0;
9872
9873         /* By default, disable wake-on-lan.  User can change this
9874          * using ETHTOOL_SWOL.
9875          */
9876         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9877
9878         return err;
9879 }
9880
9881 #ifdef CONFIG_SPARC64
9882 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9883 {
9884         struct net_device *dev = tp->dev;
9885         struct pci_dev *pdev = tp->pdev;
9886         struct pcidev_cookie *pcp = pdev->sysdata;
9887
9888         if (pcp != NULL) {
9889                 int node = pcp->prom_node;
9890
9891                 if (prom_getproplen(node, "local-mac-address") == 6) {
9892                         prom_getproperty(node, "local-mac-address",
9893                                          dev->dev_addr, 6);
9894                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9895                         return 0;
9896                 }
9897         }
9898         return -ENODEV;
9899 }
9900
9901 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9902 {
9903         struct net_device *dev = tp->dev;
9904
9905         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9906         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9907         return 0;
9908 }
9909 #endif
9910
9911 static int __devinit tg3_get_device_address(struct tg3 *tp)
9912 {
9913         struct net_device *dev = tp->dev;
9914         u32 hi, lo, mac_offset;
9915
9916 #ifdef CONFIG_SPARC64
9917         if (!tg3_get_macaddr_sparc(tp))
9918                 return 0;
9919 #endif
9920
9921         mac_offset = 0x7c;
9922         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9923              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9924             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9925                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9926                         mac_offset = 0xcc;
9927                 if (tg3_nvram_lock(tp))
9928                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9929                 else
9930                         tg3_nvram_unlock(tp);
9931         }
9932
9933         /* First try to get it from MAC address mailbox. */
9934         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9935         if ((hi >> 16) == 0x484b) {
9936                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9937                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9938
9939                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9940                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9941                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9942                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9943                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9944         }
9945         /* Next, try NVRAM. */
9946         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9947                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9948                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9949                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9950                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9951                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9952                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9953                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9954                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9955         }
9956         /* Finally just fetch it out of the MAC control regs. */
9957         else {
9958                 hi = tr32(MAC_ADDR_0_HIGH);
9959                 lo = tr32(MAC_ADDR_0_LOW);
9960
9961                 dev->dev_addr[5] = lo & 0xff;
9962                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9963                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9964                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9965                 dev->dev_addr[1] = hi & 0xff;
9966                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9967         }
9968
9969         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9970 #ifdef CONFIG_SPARC64
9971                 if (!tg3_get_default_macaddr_sparc(tp))
9972                         return 0;
9973 #endif
9974                 return -EINVAL;
9975         }
9976         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9977         return 0;
9978 }
9979
9980 #define BOUNDARY_SINGLE_CACHELINE       1
9981 #define BOUNDARY_MULTI_CACHELINE        2
9982
9983 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9984 {
9985         int cacheline_size;
9986         u8 byte;
9987         int goal;
9988
9989         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9990         if (byte == 0)
9991                 cacheline_size = 1024;
9992         else
9993                 cacheline_size = (int) byte * 4;
9994
9995         /* On 5703 and later chips, the boundary bits have no
9996          * effect.
9997          */
9998         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9999             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10000             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10001                 goto out;
10002
10003 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10004         goal = BOUNDARY_MULTI_CACHELINE;
10005 #else
10006 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10007         goal = BOUNDARY_SINGLE_CACHELINE;
10008 #else
10009         goal = 0;
10010 #endif
10011 #endif
10012
10013         if (!goal)
10014                 goto out;
10015
10016         /* PCI controllers on most RISC systems tend to disconnect
10017          * when a device tries to burst across a cache-line boundary.
10018          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10019          *
10020          * Unfortunately, for PCI-E there are only limited
10021          * write-side controls for this, and thus for reads
10022          * we will still get the disconnects.  We'll also waste
10023          * these PCI cycles for both read and write for chips
10024          * other than 5700 and 5701 which do not implement the
10025          * boundary bits.
10026          */
10027         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10028             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10029                 switch (cacheline_size) {
10030                 case 16:
10031                 case 32:
10032                 case 64:
10033                 case 128:
10034                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10035                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10036                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10037                         } else {
10038                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10039                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10040                         }
10041                         break;
10042
10043                 case 256:
10044                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10045                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10046                         break;
10047
10048                 default:
10049                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10050                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10051                         break;
10052                 };
10053         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10054                 switch (cacheline_size) {
10055                 case 16:
10056                 case 32:
10057                 case 64:
10058                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10059                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10060                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10061                                 break;
10062                         }
10063                         /* fallthrough */
10064                 case 128:
10065                 default:
10066                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10067                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10068                         break;
10069                 };
10070         } else {
10071                 switch (cacheline_size) {
10072                 case 16:
10073                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10074                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10075                                         DMA_RWCTRL_WRITE_BNDRY_16);
10076                                 break;
10077                         }
10078                         /* fallthrough */
10079                 case 32:
10080                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10081                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10082                                         DMA_RWCTRL_WRITE_BNDRY_32);
10083                                 break;
10084                         }
10085                         /* fallthrough */
10086                 case 64:
10087                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10088                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10089                                         DMA_RWCTRL_WRITE_BNDRY_64);
10090                                 break;
10091                         }
10092                         /* fallthrough */
10093                 case 128:
10094                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10095                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10096                                         DMA_RWCTRL_WRITE_BNDRY_128);
10097                                 break;
10098                         }
10099                         /* fallthrough */
10100                 case 256:
10101                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10102                                 DMA_RWCTRL_WRITE_BNDRY_256);
10103                         break;
10104                 case 512:
10105                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10106                                 DMA_RWCTRL_WRITE_BNDRY_512);
10107                         break;
10108                 case 1024:
10109                 default:
10110                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10111                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10112                         break;
10113                 };
10114         }
10115
10116 out:
10117         return val;
10118 }
10119
10120 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10121 {
10122         struct tg3_internal_buffer_desc test_desc;
10123         u32 sram_dma_descs;
10124         int i, ret;
10125
10126         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10127
10128         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10129         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10130         tw32(RDMAC_STATUS, 0);
10131         tw32(WDMAC_STATUS, 0);
10132
10133         tw32(BUFMGR_MODE, 0);
10134         tw32(FTQ_RESET, 0);
10135
10136         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10137         test_desc.addr_lo = buf_dma & 0xffffffff;
10138         test_desc.nic_mbuf = 0x00002100;
10139         test_desc.len = size;
10140
10141         /*
10142          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10143          * the *second* time the tg3 driver was getting loaded after an
10144          * initial scan.
10145          *
10146          * Broadcom tells me:
10147          *   ...the DMA engine is connected to the GRC block and a DMA
10148          *   reset may affect the GRC block in some unpredictable way...
10149          *   The behavior of resets to individual blocks has not been tested.
10150          *
10151          * Broadcom noted the GRC reset will also reset all sub-components.
10152          */
10153         if (to_device) {
10154                 test_desc.cqid_sqid = (13 << 8) | 2;
10155
10156                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10157                 udelay(40);
10158         } else {
10159                 test_desc.cqid_sqid = (16 << 8) | 7;
10160
10161                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10162                 udelay(40);
10163         }
10164         test_desc.flags = 0x00000005;
10165
10166         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10167                 u32 val;
10168
10169                 val = *(((u32 *)&test_desc) + i);
10170                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10171                                        sram_dma_descs + (i * sizeof(u32)));
10172                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10173         }
10174         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10175
10176         if (to_device) {
10177                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10178         } else {
10179                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10180         }
10181
10182         ret = -ENODEV;
10183         for (i = 0; i < 40; i++) {
10184                 u32 val;
10185
10186                 if (to_device)
10187                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10188                 else
10189                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10190                 if ((val & 0xffff) == sram_dma_descs) {
10191                         ret = 0;
10192                         break;
10193                 }
10194
10195                 udelay(100);
10196         }
10197
10198         return ret;
10199 }
10200
10201 #define TEST_BUFFER_SIZE        0x2000
10202
10203 static int __devinit tg3_test_dma(struct tg3 *tp)
10204 {
10205         dma_addr_t buf_dma;
10206         u32 *buf, saved_dma_rwctrl;
10207         int ret;
10208
10209         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10210         if (!buf) {
10211                 ret = -ENOMEM;
10212                 goto out_nofree;
10213         }
10214
10215         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10216                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10217
10218         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10219
10220         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10221                 /* DMA read watermark not used on PCIE */
10222                 tp->dma_rwctrl |= 0x00180000;
10223         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10224                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10225                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10226                         tp->dma_rwctrl |= 0x003f0000;
10227                 else
10228                         tp->dma_rwctrl |= 0x003f000f;
10229         } else {
10230                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10231                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10232                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10233
10234                         if (ccval == 0x6 || ccval == 0x7)
10235                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10236
10237                         /* Set bit 23 to enable PCIX hw bug fix */
10238                         tp->dma_rwctrl |= 0x009f0000;
10239                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10240                         /* 5780 always in PCIX mode */
10241                         tp->dma_rwctrl |= 0x00144000;
10242                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10243                         /* 5714 always in PCIX mode */
10244                         tp->dma_rwctrl |= 0x00148000;
10245                 } else {
10246                         tp->dma_rwctrl |= 0x001b000f;
10247                 }
10248         }
10249
10250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10252                 tp->dma_rwctrl &= 0xfffffff0;
10253
10254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10255             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10256                 /* Remove this if it causes problems for some boards. */
10257                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10258
10259                 /* On 5700/5701 chips, we need to set this bit.
10260                  * Otherwise the chip will issue cacheline transactions
10261                  * to streamable DMA memory with not all the byte
10262                  * enables turned on.  This is an error on several
10263                  * RISC PCI controllers, in particular sparc64.
10264                  *
10265                  * On 5703/5704 chips, this bit has been reassigned
10266                  * a different meaning.  In particular, it is used
10267                  * on those chips to enable a PCI-X workaround.
10268                  */
10269                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10270         }
10271
10272         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10273
10274 #if 0
10275         /* Unneeded, already done by tg3_get_invariants.  */
10276         tg3_switch_clocks(tp);
10277 #endif
10278
10279         ret = 0;
10280         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10281             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10282                 goto out;
10283
10284         /* It is best to perform DMA test with maximum write burst size
10285          * to expose the 5700/5701 write DMA bug.
10286          */
10287         saved_dma_rwctrl = tp->dma_rwctrl;
10288         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10289         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10290
10291         while (1) {
10292                 u32 *p = buf, i;
10293
10294                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10295                         p[i] = i;
10296
10297                 /* Send the buffer to the chip. */
10298                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10299                 if (ret) {
10300                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10301                         break;
10302                 }
10303
10304 #if 0
10305                 /* validate data reached card RAM correctly. */
10306                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10307                         u32 val;
10308                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10309                         if (le32_to_cpu(val) != p[i]) {
10310                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10311                                 /* ret = -ENODEV here? */
10312                         }
10313                         p[i] = 0;
10314                 }
10315 #endif
10316                 /* Now read it back. */
10317                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10318                 if (ret) {
10319                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10320
10321                         break;
10322                 }
10323
10324                 /* Verify it. */
10325                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10326                         if (p[i] == i)
10327                                 continue;
10328
10329                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10330                             DMA_RWCTRL_WRITE_BNDRY_16) {
10331                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10332                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10333                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10334                                 break;
10335                         } else {
10336                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10337                                 ret = -ENODEV;
10338                                 goto out;
10339                         }
10340                 }
10341
10342                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10343                         /* Success. */
10344                         ret = 0;
10345                         break;
10346                 }
10347         }
10348         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10349             DMA_RWCTRL_WRITE_BNDRY_16) {
10350                 static struct pci_device_id dma_wait_state_chipsets[] = {
10351                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10352                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10353                         { },
10354                 };
10355
10356                 /* DMA test passed without adjusting DMA boundary,
10357                  * now look for chipsets that are known to expose the
10358                  * DMA bug without failing the test.
10359                  */
10360                 if (pci_dev_present(dma_wait_state_chipsets)) {
10361                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10362                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10363                 }
10364                 else
10365                         /* Safe to use the calculated DMA boundary. */
10366                         tp->dma_rwctrl = saved_dma_rwctrl;
10367
10368                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10369         }
10370
10371 out:
10372         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10373 out_nofree:
10374         return ret;
10375 }
10376
10377 static void __devinit tg3_init_link_config(struct tg3 *tp)
10378 {
10379         tp->link_config.advertising =
10380                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10381                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10382                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10383                  ADVERTISED_Autoneg | ADVERTISED_MII);
10384         tp->link_config.speed = SPEED_INVALID;
10385         tp->link_config.duplex = DUPLEX_INVALID;
10386         tp->link_config.autoneg = AUTONEG_ENABLE;
10387         netif_carrier_off(tp->dev);
10388         tp->link_config.active_speed = SPEED_INVALID;
10389         tp->link_config.active_duplex = DUPLEX_INVALID;
10390         tp->link_config.phy_is_low_power = 0;
10391         tp->link_config.orig_speed = SPEED_INVALID;
10392         tp->link_config.orig_duplex = DUPLEX_INVALID;
10393         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10394 }
10395
10396 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10397 {
10398         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10399                 tp->bufmgr_config.mbuf_read_dma_low_water =
10400                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10401                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10402                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10403                 tp->bufmgr_config.mbuf_high_water =
10404                         DEFAULT_MB_HIGH_WATER_5705;
10405
10406                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10407                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10408                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10409                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10410                 tp->bufmgr_config.mbuf_high_water_jumbo =
10411                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10412         } else {
10413                 tp->bufmgr_config.mbuf_read_dma_low_water =
10414                         DEFAULT_MB_RDMA_LOW_WATER;
10415                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10416                         DEFAULT_MB_MACRX_LOW_WATER;
10417                 tp->bufmgr_config.mbuf_high_water =
10418                         DEFAULT_MB_HIGH_WATER;
10419
10420                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10421                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10422                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10423                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10424                 tp->bufmgr_config.mbuf_high_water_jumbo =
10425                         DEFAULT_MB_HIGH_WATER_JUMBO;
10426         }
10427
10428         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10429         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10430 }
10431
10432 static char * __devinit tg3_phy_string(struct tg3 *tp)
10433 {
10434         switch (tp->phy_id & PHY_ID_MASK) {
10435         case PHY_ID_BCM5400:    return "5400";
10436         case PHY_ID_BCM5401:    return "5401";
10437         case PHY_ID_BCM5411:    return "5411";
10438         case PHY_ID_BCM5701:    return "5701";
10439         case PHY_ID_BCM5703:    return "5703";
10440         case PHY_ID_BCM5704:    return "5704";
10441         case PHY_ID_BCM5705:    return "5705";
10442         case PHY_ID_BCM5750:    return "5750";
10443         case PHY_ID_BCM5752:    return "5752";
10444         case PHY_ID_BCM5714:    return "5714";
10445         case PHY_ID_BCM5780:    return "5780";
10446         case PHY_ID_BCM8002:    return "8002/serdes";
10447         case 0:                 return "serdes";
10448         default:                return "unknown";
10449         };
10450 }
10451
10452 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10453 {
10454         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10455                 strcpy(str, "PCI Express");
10456                 return str;
10457         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10458                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10459
10460                 strcpy(str, "PCIX:");
10461
10462                 if ((clock_ctrl == 7) ||
10463                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10464                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10465                         strcat(str, "133MHz");
10466                 else if (clock_ctrl == 0)
10467                         strcat(str, "33MHz");
10468                 else if (clock_ctrl == 2)
10469                         strcat(str, "50MHz");
10470                 else if (clock_ctrl == 4)
10471                         strcat(str, "66MHz");
10472                 else if (clock_ctrl == 6)
10473                         strcat(str, "100MHz");
10474                 else if (clock_ctrl == 7)
10475                         strcat(str, "133MHz");
10476         } else {
10477                 strcpy(str, "PCI:");
10478                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10479                         strcat(str, "66MHz");
10480                 else
10481                         strcat(str, "33MHz");
10482         }
10483         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10484                 strcat(str, ":32-bit");
10485         else
10486                 strcat(str, ":64-bit");
10487         return str;
10488 }
10489
10490 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10491 {
10492         struct pci_dev *peer;
10493         unsigned int func, devnr = tp->pdev->devfn & ~7;
10494
10495         for (func = 0; func < 8; func++) {
10496                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10497                 if (peer && peer != tp->pdev)
10498                         break;
10499                 pci_dev_put(peer);
10500         }
10501         /* 5704 can be configured in single-port mode, set peer to
10502          * tp->pdev in that case.
10503          */
10504         if (!peer) {
10505                 peer = tp->pdev;
10506                 return peer;
10507         }
10508
10509         /*
10510          * We don't need to keep the refcount elevated; there's no way
10511          * to remove one half of this device without removing the other
10512          */
10513         pci_dev_put(peer);
10514
10515         return peer;
10516 }
10517
10518 static void __devinit tg3_init_coal(struct tg3 *tp)
10519 {
10520         struct ethtool_coalesce *ec = &tp->coal;
10521
10522         memset(ec, 0, sizeof(*ec));
10523         ec->cmd = ETHTOOL_GCOALESCE;
10524         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10525         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10526         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10527         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10528         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10529         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10530         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10531         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10532         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10533
10534         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10535                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10536                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10537                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10538                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10539                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10540         }
10541
10542         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10543                 ec->rx_coalesce_usecs_irq = 0;
10544                 ec->tx_coalesce_usecs_irq = 0;
10545                 ec->stats_block_coalesce_usecs = 0;
10546         }
10547 }
10548
10549 static int __devinit tg3_init_one(struct pci_dev *pdev,
10550                                   const struct pci_device_id *ent)
10551 {
10552         static int tg3_version_printed = 0;
10553         unsigned long tg3reg_base, tg3reg_len;
10554         struct net_device *dev;
10555         struct tg3 *tp;
10556         int i, err, pci_using_dac, pm_cap;
10557         char str[40];
10558
10559         if (tg3_version_printed++ == 0)
10560                 printk(KERN_INFO "%s", version);
10561
10562         err = pci_enable_device(pdev);
10563         if (err) {
10564                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10565                        "aborting.\n");
10566                 return err;
10567         }
10568
10569         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10570                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10571                        "base address, aborting.\n");
10572                 err = -ENODEV;
10573                 goto err_out_disable_pdev;
10574         }
10575
10576         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10577         if (err) {
10578                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10579                        "aborting.\n");
10580                 goto err_out_disable_pdev;
10581         }
10582
10583         pci_set_master(pdev);
10584
10585         /* Find power-management capability. */
10586         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10587         if (pm_cap == 0) {
10588                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10589                        "aborting.\n");
10590                 err = -EIO;
10591                 goto err_out_free_res;
10592         }
10593
10594         /* Configure DMA attributes. */
10595         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10596         if (!err) {
10597                 pci_using_dac = 1;
10598                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10599                 if (err < 0) {
10600                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10601                                "for consistent allocations\n");
10602                         goto err_out_free_res;
10603                 }
10604         } else {
10605                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10606                 if (err) {
10607                         printk(KERN_ERR PFX "No usable DMA configuration, "
10608                                "aborting.\n");
10609                         goto err_out_free_res;
10610                 }
10611                 pci_using_dac = 0;
10612         }
10613
10614         tg3reg_base = pci_resource_start(pdev, 0);
10615         tg3reg_len = pci_resource_len(pdev, 0);
10616
10617         dev = alloc_etherdev(sizeof(*tp));
10618         if (!dev) {
10619                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10620                 err = -ENOMEM;
10621                 goto err_out_free_res;
10622         }
10623
10624         SET_MODULE_OWNER(dev);
10625         SET_NETDEV_DEV(dev, &pdev->dev);
10626
10627         if (pci_using_dac)
10628                 dev->features |= NETIF_F_HIGHDMA;
10629         dev->features |= NETIF_F_LLTX;
10630 #if TG3_VLAN_TAG_USED
10631         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10632         dev->vlan_rx_register = tg3_vlan_rx_register;
10633         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10634 #endif
10635
10636         tp = netdev_priv(dev);
10637         tp->pdev = pdev;
10638         tp->dev = dev;
10639         tp->pm_cap = pm_cap;
10640         tp->mac_mode = TG3_DEF_MAC_MODE;
10641         tp->rx_mode = TG3_DEF_RX_MODE;
10642         tp->tx_mode = TG3_DEF_TX_MODE;
10643         tp->mi_mode = MAC_MI_MODE_BASE;
10644         if (tg3_debug > 0)
10645                 tp->msg_enable = tg3_debug;
10646         else
10647                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10648
10649         /* The word/byte swap controls here control register access byte
10650          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10651          * setting below.
10652          */
10653         tp->misc_host_ctrl =
10654                 MISC_HOST_CTRL_MASK_PCI_INT |
10655                 MISC_HOST_CTRL_WORD_SWAP |
10656                 MISC_HOST_CTRL_INDIR_ACCESS |
10657                 MISC_HOST_CTRL_PCISTATE_RW;
10658
10659         /* The NONFRM (non-frame) byte/word swap controls take effect
10660          * on descriptor entries, anything which isn't packet data.
10661          *
10662          * The StrongARM chips on the board (one for tx, one for rx)
10663          * are running in big-endian mode.
10664          */
10665         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10666                         GRC_MODE_WSWAP_NONFRM_DATA);
10667 #ifdef __BIG_ENDIAN
10668         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10669 #endif
10670         spin_lock_init(&tp->lock);
10671         spin_lock_init(&tp->tx_lock);
10672         spin_lock_init(&tp->indirect_lock);
10673         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10674
10675         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10676         if (tp->regs == 0UL) {
10677                 printk(KERN_ERR PFX "Cannot map device registers, "
10678                        "aborting.\n");
10679                 err = -ENOMEM;
10680                 goto err_out_free_dev;
10681         }
10682
10683         tg3_init_link_config(tp);
10684
10685         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10686         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10687         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10688
10689         dev->open = tg3_open;
10690         dev->stop = tg3_close;
10691         dev->get_stats = tg3_get_stats;
10692         dev->set_multicast_list = tg3_set_rx_mode;
10693         dev->set_mac_address = tg3_set_mac_addr;
10694         dev->do_ioctl = tg3_ioctl;
10695         dev->tx_timeout = tg3_tx_timeout;
10696         dev->poll = tg3_poll;
10697         dev->ethtool_ops = &tg3_ethtool_ops;
10698         dev->weight = 64;
10699         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10700         dev->change_mtu = tg3_change_mtu;
10701         dev->irq = pdev->irq;
10702 #ifdef CONFIG_NET_POLL_CONTROLLER
10703         dev->poll_controller = tg3_poll_controller;
10704 #endif
10705
10706         err = tg3_get_invariants(tp);
10707         if (err) {
10708                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10709                        "aborting.\n");
10710                 goto err_out_iounmap;
10711         }
10712
10713         tg3_init_bufmgr_config(tp);
10714
10715 #if TG3_TSO_SUPPORT != 0
10716         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10717                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10718         }
10719         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10720             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10721             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10722             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10723                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10724         } else {
10725                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10726         }
10727
10728         /* TSO is off by default, user can enable using ethtool.  */
10729 #if 0
10730         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10731                 dev->features |= NETIF_F_TSO;
10732 #endif
10733
10734 #endif
10735
10736         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10737             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10738             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10739                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10740                 tp->rx_pending = 63;
10741         }
10742
10743         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10744             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10745                 tp->pdev_peer = tg3_find_peer(tp);
10746
10747         err = tg3_get_device_address(tp);
10748         if (err) {
10749                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10750                        "aborting.\n");
10751                 goto err_out_iounmap;
10752         }
10753
10754         /*
10755          * Reset chip in case UNDI or EFI driver did not shutdown
10756          * DMA self test will enable WDMAC and we'll see (spurious)
10757          * pending DMA on the PCI bus at that point.
10758          */
10759         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10760             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10761                 pci_save_state(tp->pdev);
10762                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10763                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10764         }
10765
10766         err = tg3_test_dma(tp);
10767         if (err) {
10768                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10769                 goto err_out_iounmap;
10770         }
10771
10772         /* Tigon3 can do ipv4 only... and some chips have buggy
10773          * checksumming.
10774          */
10775         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10776                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10777                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10778         } else
10779                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10780
10781         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10782                 dev->features &= ~NETIF_F_HIGHDMA;
10783
10784         /* flow control autonegotiation is default behavior */
10785         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10786
10787         tg3_init_coal(tp);
10788
10789         /* Now that we have fully setup the chip, save away a snapshot
10790          * of the PCI config space.  We need to restore this after
10791          * GRC_MISC_CFG core clock resets and some resume events.
10792          */
10793         pci_save_state(tp->pdev);
10794
10795         err = register_netdev(dev);
10796         if (err) {
10797                 printk(KERN_ERR PFX "Cannot register net device, "
10798                        "aborting.\n");
10799                 goto err_out_iounmap;
10800         }
10801
10802         pci_set_drvdata(pdev, dev);
10803
10804         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10805                dev->name,
10806                tp->board_part_number,
10807                tp->pci_chip_rev_id,
10808                tg3_phy_string(tp),
10809                tg3_bus_string(tp, str),
10810                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10811
10812         for (i = 0; i < 6; i++)
10813                 printk("%2.2x%c", dev->dev_addr[i],
10814                        i == 5 ? '\n' : ':');
10815
10816         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10817                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10818                "TSOcap[%d] \n",
10819                dev->name,
10820                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10821                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10822                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10823                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10824                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10825                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10826                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10827         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10828                dev->name, tp->dma_rwctrl);
10829
10830         return 0;
10831
10832 err_out_iounmap:
10833         if (tp->regs) {
10834                 iounmap(tp->regs);
10835                 tp->regs = NULL;
10836         }
10837
10838 err_out_free_dev:
10839         free_netdev(dev);
10840
10841 err_out_free_res:
10842         pci_release_regions(pdev);
10843
10844 err_out_disable_pdev:
10845         pci_disable_device(pdev);
10846         pci_set_drvdata(pdev, NULL);
10847         return err;
10848 }
10849
10850 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10851 {
10852         struct net_device *dev = pci_get_drvdata(pdev);
10853
10854         if (dev) {
10855                 struct tg3 *tp = netdev_priv(dev);
10856
10857                 unregister_netdev(dev);
10858                 if (tp->regs) {
10859                         iounmap(tp->regs);
10860                         tp->regs = NULL;
10861                 }
10862                 free_netdev(dev);
10863                 pci_release_regions(pdev);
10864                 pci_disable_device(pdev);
10865                 pci_set_drvdata(pdev, NULL);
10866         }
10867 }
10868
10869 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10870 {
10871         struct net_device *dev = pci_get_drvdata(pdev);
10872         struct tg3 *tp = netdev_priv(dev);
10873         int err;
10874
10875         if (!netif_running(dev))
10876                 return 0;
10877
10878         tg3_netif_stop(tp);
10879
10880         del_timer_sync(&tp->timer);
10881
10882         tg3_full_lock(tp, 1);
10883         tg3_disable_ints(tp);
10884         tg3_full_unlock(tp);
10885
10886         netif_device_detach(dev);
10887
10888         tg3_full_lock(tp, 0);
10889         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10890         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
10891         tg3_full_unlock(tp);
10892
10893         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10894         if (err) {
10895                 tg3_full_lock(tp, 0);
10896
10897                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10898                 tg3_init_hw(tp);
10899
10900                 tp->timer.expires = jiffies + tp->timer_offset;
10901                 add_timer(&tp->timer);
10902
10903                 netif_device_attach(dev);
10904                 tg3_netif_start(tp);
10905
10906                 tg3_full_unlock(tp);
10907         }
10908
10909         return err;
10910 }
10911
10912 static int tg3_resume(struct pci_dev *pdev)
10913 {
10914         struct net_device *dev = pci_get_drvdata(pdev);
10915         struct tg3 *tp = netdev_priv(dev);
10916         int err;
10917
10918         if (!netif_running(dev))
10919                 return 0;
10920
10921         pci_restore_state(tp->pdev);
10922
10923         err = tg3_set_power_state(tp, 0);
10924         if (err)
10925                 return err;
10926
10927         netif_device_attach(dev);
10928
10929         tg3_full_lock(tp, 0);
10930
10931         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10932         tg3_init_hw(tp);
10933
10934         tp->timer.expires = jiffies + tp->timer_offset;
10935         add_timer(&tp->timer);
10936
10937         tg3_netif_start(tp);
10938
10939         tg3_full_unlock(tp);
10940
10941         return 0;
10942 }
10943
10944 static struct pci_driver tg3_driver = {
10945         .name           = DRV_MODULE_NAME,
10946         .id_table       = tg3_pci_tbl,
10947         .probe          = tg3_init_one,
10948         .remove         = __devexit_p(tg3_remove_one),
10949         .suspend        = tg3_suspend,
10950         .resume         = tg3_resume
10951 };
10952
10953 static int __init tg3_init(void)
10954 {
10955         return pci_module_init(&tg3_driver);
10956 }
10957
10958 static void __exit tg3_cleanup(void)
10959 {
10960         pci_unregister_driver(&tg3_driver);
10961 }
10962
10963 module_init(tg3_init);
10964 module_exit(tg3_cleanup);