IB/mthca: Recover from catastrophic errors
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { 0, }
263 };
264
265 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266
267 static struct {
268         const char string[ETH_GSTRING_LEN];
269 } ethtool_stats_keys[TG3_NUM_STATS] = {
270         { "rx_octets" },
271         { "rx_fragments" },
272         { "rx_ucast_packets" },
273         { "rx_mcast_packets" },
274         { "rx_bcast_packets" },
275         { "rx_fcs_errors" },
276         { "rx_align_errors" },
277         { "rx_xon_pause_rcvd" },
278         { "rx_xoff_pause_rcvd" },
279         { "rx_mac_ctrl_rcvd" },
280         { "rx_xoff_entered" },
281         { "rx_frame_too_long_errors" },
282         { "rx_jabbers" },
283         { "rx_undersize_packets" },
284         { "rx_in_length_errors" },
285         { "rx_out_length_errors" },
286         { "rx_64_or_less_octet_packets" },
287         { "rx_65_to_127_octet_packets" },
288         { "rx_128_to_255_octet_packets" },
289         { "rx_256_to_511_octet_packets" },
290         { "rx_512_to_1023_octet_packets" },
291         { "rx_1024_to_1522_octet_packets" },
292         { "rx_1523_to_2047_octet_packets" },
293         { "rx_2048_to_4095_octet_packets" },
294         { "rx_4096_to_8191_octet_packets" },
295         { "rx_8192_to_9022_octet_packets" },
296
297         { "tx_octets" },
298         { "tx_collisions" },
299
300         { "tx_xon_sent" },
301         { "tx_xoff_sent" },
302         { "tx_flow_control" },
303         { "tx_mac_errors" },
304         { "tx_single_collisions" },
305         { "tx_mult_collisions" },
306         { "tx_deferred" },
307         { "tx_excessive_collisions" },
308         { "tx_late_collisions" },
309         { "tx_collide_2times" },
310         { "tx_collide_3times" },
311         { "tx_collide_4times" },
312         { "tx_collide_5times" },
313         { "tx_collide_6times" },
314         { "tx_collide_7times" },
315         { "tx_collide_8times" },
316         { "tx_collide_9times" },
317         { "tx_collide_10times" },
318         { "tx_collide_11times" },
319         { "tx_collide_12times" },
320         { "tx_collide_13times" },
321         { "tx_collide_14times" },
322         { "tx_collide_15times" },
323         { "tx_ucast_packets" },
324         { "tx_mcast_packets" },
325         { "tx_bcast_packets" },
326         { "tx_carrier_sense_errors" },
327         { "tx_discards" },
328         { "tx_errors" },
329
330         { "dma_writeq_full" },
331         { "dma_write_prioq_full" },
332         { "rxbds_empty" },
333         { "rx_discards" },
334         { "rx_errors" },
335         { "rx_threshold_hit" },
336
337         { "dma_readq_full" },
338         { "dma_read_prioq_full" },
339         { "tx_comp_queue_full" },
340
341         { "ring_set_send_prod_index" },
342         { "ring_status_update" },
343         { "nic_irqs" },
344         { "nic_avoided_irqs" },
345         { "nic_tx_threshold_hit" }
346 };
347
348 static struct {
349         const char string[ETH_GSTRING_LEN];
350 } ethtool_test_keys[TG3_NUM_TEST] = {
351         { "nvram test     (online) " },
352         { "link test      (online) " },
353         { "register test  (offline)" },
354         { "memory test    (offline)" },
355         { "loopback test  (offline)" },
356         { "interrupt test (offline)" },
357 };
358
359 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
360 {
361         writel(val, tp->regs + off);
362 }
363
364 static u32 tg3_read32(struct tg3 *tp, u32 off)
365 {
366         return (readl(tp->regs + off)); 
367 }
368
369 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
370 {
371         unsigned long flags;
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377 }
378
379 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
380 {
381         writel(val, tp->regs + off);
382         readl(tp->regs + off);
383 }
384
385 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
386 {
387         unsigned long flags;
388         u32 val;
389
390         spin_lock_irqsave(&tp->indirect_lock, flags);
391         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
392         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
393         spin_unlock_irqrestore(&tp->indirect_lock, flags);
394         return val;
395 }
396
397 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
398 {
399         unsigned long flags;
400
401         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
402                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
403                                        TG3_64BIT_REG_LOW, val);
404                 return;
405         }
406         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
407                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
408                                        TG3_64BIT_REG_LOW, val);
409                 return;
410         }
411
412         spin_lock_irqsave(&tp->indirect_lock, flags);
413         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
414         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
415         spin_unlock_irqrestore(&tp->indirect_lock, flags);
416
417         /* In indirect mode when disabling interrupts, we also need
418          * to clear the interrupt bit in the GRC local ctrl register.
419          */
420         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
421             (val == 0x1)) {
422                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
423                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
424         }
425 }
426
427 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
428 {
429         unsigned long flags;
430         u32 val;
431
432         spin_lock_irqsave(&tp->indirect_lock, flags);
433         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
434         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
435         spin_unlock_irqrestore(&tp->indirect_lock, flags);
436         return val;
437 }
438
439 /* usec_wait specifies the wait time in usec when writing to certain registers
440  * where it is unsafe to read back the register without some delay.
441  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
442  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
443  */
444 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
445 {
446         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
447             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
448                 /* Non-posted methods */
449                 tp->write32(tp, off, val);
450         else {
451                 /* Posted method */
452                 tg3_write32(tp, off, val);
453                 if (usec_wait)
454                         udelay(usec_wait);
455                 tp->read32(tp, off);
456         }
457         /* Wait again after the read for the posted method to guarantee that
458          * the wait time is met.
459          */
460         if (usec_wait)
461                 udelay(usec_wait);
462 }
463
464 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
465 {
466         tp->write32_mbox(tp, off, val);
467         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
468             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
469                 tp->read32_mbox(tp, off);
470 }
471
472 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
473 {
474         void __iomem *mbox = tp->regs + off;
475         writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
477                 writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
479                 readl(mbox);
480 }
481
482 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
483 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
484 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
485 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
486 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
487
488 #define tw32(reg,val)           tp->write32(tp, reg, val)
489 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
490 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
491 #define tr32(reg)               tp->read32(tp, reg)
492
493 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
494 {
495         unsigned long flags;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
499                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
501
502                 /* Always leave this as zero. */
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
504         } else {
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
506                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
507
508                 /* Always leave this as zero. */
509                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
510         }
511         spin_unlock_irqrestore(&tp->indirect_lock, flags);
512 }
513
514 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
515 {
516         unsigned long flags;
517
518         spin_lock_irqsave(&tp->indirect_lock, flags);
519         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
521                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
522
523                 /* Always leave this as zero. */
524                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
525         } else {
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
527                 *val = tr32(TG3PCI_MEM_WIN_DATA);
528
529                 /* Always leave this as zero. */
530                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
531         }
532         spin_unlock_irqrestore(&tp->indirect_lock, flags);
533 }
534
535 static void tg3_disable_ints(struct tg3 *tp)
536 {
537         tw32(TG3PCI_MISC_HOST_CTRL,
538              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
540 }
541
542 static inline void tg3_cond_int(struct tg3 *tp)
543 {
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             (tp->hw_status->status & SD_STATUS_UPDATED))
546                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
547 }
548
549 static void tg3_enable_ints(struct tg3 *tp)
550 {
551         tp->irq_sync = 0;
552         wmb();
553
554         tw32(TG3PCI_MISC_HOST_CTRL,
555              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
556         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
557                        (tp->last_tag << 24));
558         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
559                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560                                (tp->last_tag << 24));
561         tg3_cond_int(tp);
562 }
563
564 static inline unsigned int tg3_has_work(struct tg3 *tp)
565 {
566         struct tg3_hw_status *sblk = tp->hw_status;
567         unsigned int work_exists = 0;
568
569         /* check for phy events */
570         if (!(tp->tg3_flags &
571               (TG3_FLAG_USE_LINKCHG_REG |
572                TG3_FLAG_POLL_SERDES))) {
573                 if (sblk->status & SD_STATUS_LINK_CHG)
574                         work_exists = 1;
575         }
576         /* check for RX/TX work to do */
577         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
578             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
579                 work_exists = 1;
580
581         return work_exists;
582 }
583
584 /* tg3_restart_ints
585  *  similar to tg3_enable_ints, but it accurately determines whether there
586  *  is new work pending and can return without flushing the PIO write
587  *  which reenables interrupts 
588  */
589 static void tg3_restart_ints(struct tg3 *tp)
590 {
591         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
592                      tp->last_tag << 24);
593         mmiowb();
594
595         /* When doing tagged status, this work check is unnecessary.
596          * The last_tag we write above tells the chip which piece of
597          * work we've completed.
598          */
599         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
600             tg3_has_work(tp))
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static inline void tg3_netif_stop(struct tg3 *tp)
606 {
607         tp->dev->trans_start = jiffies; /* prevent tx timeout */
608         netif_poll_disable(tp->dev);
609         netif_tx_disable(tp->dev);
610 }
611
612 static inline void tg3_netif_start(struct tg3 *tp)
613 {
614         netif_wake_queue(tp->dev);
615         /* NOTE: unconditional netif_wake_queue is only appropriate
616          * so long as all callers are assured to have free tx slots
617          * (such as after tg3_init_hw)
618          */
619         netif_poll_enable(tp->dev);
620         tp->hw_status->status |= SD_STATUS_UPDATED;
621         tg3_enable_ints(tp);
622 }
623
624 static void tg3_switch_clocks(struct tg3 *tp)
625 {
626         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
627         u32 orig_clock_ctrl;
628
629         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
630                 return;
631
632         orig_clock_ctrl = clock_ctrl;
633         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
634                        CLOCK_CTRL_CLKRUN_OENABLE |
635                        0x1f);
636         tp->pci_clock_ctrl = clock_ctrl;
637
638         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
639                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
640                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
641                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
642                 }
643         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
644                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645                             clock_ctrl |
646                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
647                             40);
648                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
650                             40);
651         }
652         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
653 }
654
655 #define PHY_BUSY_LOOPS  5000
656
657 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
658 {
659         u32 frame_val;
660         unsigned int loops;
661         int ret;
662
663         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
664                 tw32_f(MAC_MI_MODE,
665                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
666                 udelay(80);
667         }
668
669         *val = 0x0;
670
671         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
672                       MI_COM_PHY_ADDR_MASK);
673         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
674                       MI_COM_REG_ADDR_MASK);
675         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
676         
677         tw32_f(MAC_MI_COM, frame_val);
678
679         loops = PHY_BUSY_LOOPS;
680         while (loops != 0) {
681                 udelay(10);
682                 frame_val = tr32(MAC_MI_COM);
683
684                 if ((frame_val & MI_COM_BUSY) == 0) {
685                         udelay(5);
686                         frame_val = tr32(MAC_MI_COM);
687                         break;
688                 }
689                 loops -= 1;
690         }
691
692         ret = -EBUSY;
693         if (loops != 0) {
694                 *val = frame_val & MI_COM_DATA_MASK;
695                 ret = 0;
696         }
697
698         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
699                 tw32_f(MAC_MI_MODE, tp->mi_mode);
700                 udelay(80);
701         }
702
703         return ret;
704 }
705
706 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
707 {
708         u32 frame_val;
709         unsigned int loops;
710         int ret;
711
712         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
713                 tw32_f(MAC_MI_MODE,
714                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
715                 udelay(80);
716         }
717
718         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
719                       MI_COM_PHY_ADDR_MASK);
720         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
721                       MI_COM_REG_ADDR_MASK);
722         frame_val |= (val & MI_COM_DATA_MASK);
723         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
724         
725         tw32_f(MAC_MI_COM, frame_val);
726
727         loops = PHY_BUSY_LOOPS;
728         while (loops != 0) {
729                 udelay(10);
730                 frame_val = tr32(MAC_MI_COM);
731                 if ((frame_val & MI_COM_BUSY) == 0) {
732                         udelay(5);
733                         frame_val = tr32(MAC_MI_COM);
734                         break;
735                 }
736                 loops -= 1;
737         }
738
739         ret = -EBUSY;
740         if (loops != 0)
741                 ret = 0;
742
743         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
744                 tw32_f(MAC_MI_MODE, tp->mi_mode);
745                 udelay(80);
746         }
747
748         return ret;
749 }
750
751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
752 {
753         u32 val;
754
755         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
756                 return;
757
758         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
759             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
760                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
761                              (val | (1 << 15) | (1 << 4)));
762 }
763
764 static int tg3_bmcr_reset(struct tg3 *tp)
765 {
766         u32 phy_control;
767         int limit, err;
768
769         /* OK, reset it, and poll the BMCR_RESET bit until it
770          * clears or we time out.
771          */
772         phy_control = BMCR_RESET;
773         err = tg3_writephy(tp, MII_BMCR, phy_control);
774         if (err != 0)
775                 return -EBUSY;
776
777         limit = 5000;
778         while (limit--) {
779                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
780                 if (err != 0)
781                         return -EBUSY;
782
783                 if ((phy_control & BMCR_RESET) == 0) {
784                         udelay(40);
785                         break;
786                 }
787                 udelay(10);
788         }
789         if (limit <= 0)
790                 return -EBUSY;
791
792         return 0;
793 }
794
795 static int tg3_wait_macro_done(struct tg3 *tp)
796 {
797         int limit = 100;
798
799         while (limit--) {
800                 u32 tmp32;
801
802                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
803                         if ((tmp32 & 0x1000) == 0)
804                                 break;
805                 }
806         }
807         if (limit <= 0)
808                 return -EBUSY;
809
810         return 0;
811 }
812
813 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
814 {
815         static const u32 test_pat[4][6] = {
816         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
817         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
818         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
819         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
820         };
821         int chan;
822
823         for (chan = 0; chan < 4; chan++) {
824                 int i;
825
826                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
827                              (chan * 0x2000) | 0x0200);
828                 tg3_writephy(tp, 0x16, 0x0002);
829
830                 for (i = 0; i < 6; i++)
831                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
832                                      test_pat[chan][i]);
833
834                 tg3_writephy(tp, 0x16, 0x0202);
835                 if (tg3_wait_macro_done(tp)) {
836                         *resetp = 1;
837                         return -EBUSY;
838                 }
839
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
841                              (chan * 0x2000) | 0x0200);
842                 tg3_writephy(tp, 0x16, 0x0082);
843                 if (tg3_wait_macro_done(tp)) {
844                         *resetp = 1;
845                         return -EBUSY;
846                 }
847
848                 tg3_writephy(tp, 0x16, 0x0802);
849                 if (tg3_wait_macro_done(tp)) {
850                         *resetp = 1;
851                         return -EBUSY;
852                 }
853
854                 for (i = 0; i < 6; i += 2) {
855                         u32 low, high;
856
857                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
858                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
859                             tg3_wait_macro_done(tp)) {
860                                 *resetp = 1;
861                                 return -EBUSY;
862                         }
863                         low &= 0x7fff;
864                         high &= 0x000f;
865                         if (low != test_pat[chan][i] ||
866                             high != test_pat[chan][i+1]) {
867                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
868                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
869                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
870
871                                 return -EBUSY;
872                         }
873                 }
874         }
875
876         return 0;
877 }
878
879 static int tg3_phy_reset_chanpat(struct tg3 *tp)
880 {
881         int chan;
882
883         for (chan = 0; chan < 4; chan++) {
884                 int i;
885
886                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
887                              (chan * 0x2000) | 0x0200);
888                 tg3_writephy(tp, 0x16, 0x0002);
889                 for (i = 0; i < 6; i++)
890                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
891                 tg3_writephy(tp, 0x16, 0x0202);
892                 if (tg3_wait_macro_done(tp))
893                         return -EBUSY;
894         }
895
896         return 0;
897 }
898
899 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
900 {
901         u32 reg32, phy9_orig;
902         int retries, do_phy_reset, err;
903
904         retries = 10;
905         do_phy_reset = 1;
906         do {
907                 if (do_phy_reset) {
908                         err = tg3_bmcr_reset(tp);
909                         if (err)
910                                 return err;
911                         do_phy_reset = 0;
912                 }
913
914                 /* Disable transmitter and interrupt.  */
915                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
916                         continue;
917
918                 reg32 |= 0x3000;
919                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
920
921                 /* Set full-duplex, 1000 mbps.  */
922                 tg3_writephy(tp, MII_BMCR,
923                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
924
925                 /* Set to master mode.  */
926                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
927                         continue;
928
929                 tg3_writephy(tp, MII_TG3_CTRL,
930                              (MII_TG3_CTRL_AS_MASTER |
931                               MII_TG3_CTRL_ENABLE_AS_MASTER));
932
933                 /* Enable SM_DSP_CLOCK and 6dB.  */
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
935
936                 /* Block the PHY control access.  */
937                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
938                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
939
940                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
941                 if (!err)
942                         break;
943         } while (--retries);
944
945         err = tg3_phy_reset_chanpat(tp);
946         if (err)
947                 return err;
948
949         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
950         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
951
952         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
953         tg3_writephy(tp, 0x16, 0x0000);
954
955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
957                 /* Set Extended packet length bit for jumbo frames */
958                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
959         }
960         else {
961                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
962         }
963
964         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
965
966         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
967                 reg32 &= ~0x3000;
968                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
969         } else if (!err)
970                 err = -EBUSY;
971
972         return err;
973 }
974
975 static void tg3_link_report(struct tg3 *);
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
991                 netif_carrier_off(tp->dev);
992                 tg3_link_report(tp);
993         }
994
995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
998                 err = tg3_phy_reset_5703_4_5(tp);
999                 if (err)
1000                         return err;
1001                 goto out;
1002         }
1003
1004         err = tg3_bmcr_reset(tp);
1005         if (err)
1006                 return err;
1007
1008 out:
1009         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1015                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1016         }
1017         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1018                 tg3_writephy(tp, 0x1c, 0x8d68);
1019                 tg3_writephy(tp, 0x1c, 0x8d68);
1020         }
1021         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1022                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1023                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1024                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1025                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1026                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1027                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1028                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1029                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1030         }
1031         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1032                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1033                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1034                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1036         }
1037         /* Set Extended packet length bit (bit 14) on all chips that */
1038         /* support jumbo frames */
1039         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1040                 /* Cannot do read-modify-write on 5401 */
1041                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1042         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1043                 u32 phy_reg;
1044
1045                 /* Set bit 14 with read-modify-write to preserve other bits */
1046                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1047                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1048                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1049         }
1050
1051         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1052          * jumbo frames transmission.
1053          */
1054         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1055                 u32 phy_reg;
1056
1057                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1058                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1059                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1060         }
1061
1062         tg3_phy_set_wirespeed(tp);
1063         return 0;
1064 }
1065
1066 static void tg3_frob_aux_power(struct tg3 *tp)
1067 {
1068         struct tg3 *tp_peer = tp;
1069
1070         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1071                 return;
1072
1073         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1074             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1075                 struct net_device *dev_peer;
1076
1077                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1078                 /* remove_one() may have been run on the peer. */
1079                 if (!dev_peer)
1080                         tp_peer = tp;
1081                 else
1082                         tp_peer = netdev_priv(dev_peer);
1083         }
1084
1085         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1086             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1087             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1088             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1090                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1091                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1092                                     (GRC_LCLCTRL_GPIO_OE0 |
1093                                      GRC_LCLCTRL_GPIO_OE1 |
1094                                      GRC_LCLCTRL_GPIO_OE2 |
1095                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1096                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1097                                     100);
1098                 } else {
1099                         u32 no_gpio2;
1100                         u32 grc_local_ctrl = 0;
1101
1102                         if (tp_peer != tp &&
1103                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1104                                 return;
1105
1106                         /* Workaround to prevent overdrawing Amps. */
1107                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1108                             ASIC_REV_5714) {
1109                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1110                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1111                                             grc_local_ctrl, 100);
1112                         }
1113
1114                         /* On 5753 and variants, GPIO2 cannot be used. */
1115                         no_gpio2 = tp->nic_sram_data_cfg &
1116                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1117
1118                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1119                                          GRC_LCLCTRL_GPIO_OE1 |
1120                                          GRC_LCLCTRL_GPIO_OE2 |
1121                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1122                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1123                         if (no_gpio2) {
1124                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1125                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1126                         }
1127                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128                                                     grc_local_ctrl, 100);
1129
1130                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1131
1132                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1133                                                     grc_local_ctrl, 100);
1134
1135                         if (!no_gpio2) {
1136                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1137                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                             grc_local_ctrl, 100);
1139                         }
1140                 }
1141         } else {
1142                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1143                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1144                         if (tp_peer != tp &&
1145                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1146                                 return;
1147
1148                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1149                                     (GRC_LCLCTRL_GPIO_OE1 |
1150                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1151
1152                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153                                     GRC_LCLCTRL_GPIO_OE1, 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     (GRC_LCLCTRL_GPIO_OE1 |
1157                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1158                 }
1159         }
1160 }
1161
1162 static int tg3_setup_phy(struct tg3 *, int);
1163
1164 #define RESET_KIND_SHUTDOWN     0
1165 #define RESET_KIND_INIT         1
1166 #define RESET_KIND_SUSPEND      2
1167
1168 static void tg3_write_sig_post_reset(struct tg3 *, int);
1169 static int tg3_halt_cpu(struct tg3 *, u32);
1170 static int tg3_nvram_lock(struct tg3 *);
1171 static void tg3_nvram_unlock(struct tg3 *);
1172
1173 static void tg3_power_down_phy(struct tg3 *tp)
1174 {
1175         /* The PHY should not be powered down on some chips because
1176          * of bugs.
1177          */
1178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1180             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1181              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1182                 return;
1183         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1184 }
1185
1186 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1187 {
1188         u32 misc_host_ctrl;
1189         u16 power_control, power_caps;
1190         int pm = tp->pm_cap;
1191
1192         /* Make sure register accesses (indirect or otherwise)
1193          * will function correctly.
1194          */
1195         pci_write_config_dword(tp->pdev,
1196                                TG3PCI_MISC_HOST_CTRL,
1197                                tp->misc_host_ctrl);
1198
1199         pci_read_config_word(tp->pdev,
1200                              pm + PCI_PM_CTRL,
1201                              &power_control);
1202         power_control |= PCI_PM_CTRL_PME_STATUS;
1203         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1204         switch (state) {
1205         case PCI_D0:
1206                 power_control |= 0;
1207                 pci_write_config_word(tp->pdev,
1208                                       pm + PCI_PM_CTRL,
1209                                       power_control);
1210                 udelay(100);    /* Delay after power state change */
1211
1212                 /* Switch out of Vaux if it is not a LOM */
1213                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1214                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1215
1216                 return 0;
1217
1218         case PCI_D1:
1219                 power_control |= 1;
1220                 break;
1221
1222         case PCI_D2:
1223                 power_control |= 2;
1224                 break;
1225
1226         case PCI_D3hot:
1227                 power_control |= 3;
1228                 break;
1229
1230         default:
1231                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1232                        "requested.\n",
1233                        tp->dev->name, state);
1234                 return -EINVAL;
1235         };
1236
1237         power_control |= PCI_PM_CTRL_PME_ENABLE;
1238
1239         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1240         tw32(TG3PCI_MISC_HOST_CTRL,
1241              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1242
1243         if (tp->link_config.phy_is_low_power == 0) {
1244                 tp->link_config.phy_is_low_power = 1;
1245                 tp->link_config.orig_speed = tp->link_config.speed;
1246                 tp->link_config.orig_duplex = tp->link_config.duplex;
1247                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1248         }
1249
1250         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1251                 tp->link_config.speed = SPEED_10;
1252                 tp->link_config.duplex = DUPLEX_HALF;
1253                 tp->link_config.autoneg = AUTONEG_ENABLE;
1254                 tg3_setup_phy(tp, 0);
1255         }
1256
1257         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1258                 int i;
1259                 u32 val;
1260
1261                 for (i = 0; i < 200; i++) {
1262                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1263                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1264                                 break;
1265                         msleep(1);
1266                 }
1267         }
1268         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1269                                              WOL_DRV_STATE_SHUTDOWN |
1270                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1271
1272         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1273
1274         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1275                 u32 mac_mode;
1276
1277                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1278                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1279                         udelay(40);
1280
1281                         mac_mode = MAC_MODE_PORT_MODE_MII;
1282
1283                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1284                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1285                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1286                 } else {
1287                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1288                 }
1289
1290                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1291                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1292
1293                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1294                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1295                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1296
1297                 tw32_f(MAC_MODE, mac_mode);
1298                 udelay(100);
1299
1300                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1301                 udelay(10);
1302         }
1303
1304         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1305             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1306              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1307                 u32 base_val;
1308
1309                 base_val = tp->pci_clock_ctrl;
1310                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1311                              CLOCK_CTRL_TXCLK_DISABLE);
1312
1313                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1314                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1315         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1316                 /* do nothing */
1317         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1318                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1319                 u32 newbits1, newbits2;
1320
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1322                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1323                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1324                                     CLOCK_CTRL_TXCLK_DISABLE |
1325                                     CLOCK_CTRL_ALTCLK);
1326                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1327                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1328                         newbits1 = CLOCK_CTRL_625_CORE;
1329                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1330                 } else {
1331                         newbits1 = CLOCK_CTRL_ALTCLK;
1332                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1333                 }
1334
1335                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1336                             40);
1337
1338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1339                             40);
1340
1341                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1342                         u32 newbits3;
1343
1344                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1345                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1346                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1347                                             CLOCK_CTRL_TXCLK_DISABLE |
1348                                             CLOCK_CTRL_44MHZ_CORE);
1349                         } else {
1350                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1351                         }
1352
1353                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1354                                     tp->pci_clock_ctrl | newbits3, 40);
1355                 }
1356         }
1357
1358         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1359             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1360                 /* Turn off the PHY */
1361                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1362                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1363                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1364                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1365                         tg3_power_down_phy(tp);
1366                 }
1367         }
1368
1369         tg3_frob_aux_power(tp);
1370
1371         /* Workaround for unstable PLL clock */
1372         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1373             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1374                 u32 val = tr32(0x7d00);
1375
1376                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1377                 tw32(0x7d00, val);
1378                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1379                         int err;
1380
1381                         err = tg3_nvram_lock(tp);
1382                         tg3_halt_cpu(tp, RX_CPU_BASE);
1383                         if (!err)
1384                                 tg3_nvram_unlock(tp);
1385                 }
1386         }
1387
1388         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1389
1390         /* Finally, set the new power state. */
1391         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1392         udelay(100);    /* Delay after power state change */
1393
1394         return 0;
1395 }
1396
1397 static void tg3_link_report(struct tg3 *tp)
1398 {
1399         if (!netif_carrier_ok(tp->dev)) {
1400                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1401         } else {
1402                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1403                        tp->dev->name,
1404                        (tp->link_config.active_speed == SPEED_1000 ?
1405                         1000 :
1406                         (tp->link_config.active_speed == SPEED_100 ?
1407                          100 : 10)),
1408                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1409                         "full" : "half"));
1410
1411                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1412                        "%s for RX.\n",
1413                        tp->dev->name,
1414                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1415                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1416         }
1417 }
1418
1419 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1420 {
1421         u32 new_tg3_flags = 0;
1422         u32 old_rx_mode = tp->rx_mode;
1423         u32 old_tx_mode = tp->tx_mode;
1424
1425         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1426
1427                 /* Convert 1000BaseX flow control bits to 1000BaseT
1428                  * bits before resolving flow control.
1429                  */
1430                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1431                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1432                                        ADVERTISE_PAUSE_ASYM);
1433                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1434
1435                         if (local_adv & ADVERTISE_1000XPAUSE)
1436                                 local_adv |= ADVERTISE_PAUSE_CAP;
1437                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1438                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1439                         if (remote_adv & LPA_1000XPAUSE)
1440                                 remote_adv |= LPA_PAUSE_CAP;
1441                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1442                                 remote_adv |= LPA_PAUSE_ASYM;
1443                 }
1444
1445                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1446                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1447                                 if (remote_adv & LPA_PAUSE_CAP)
1448                                         new_tg3_flags |=
1449                                                 (TG3_FLAG_RX_PAUSE |
1450                                                 TG3_FLAG_TX_PAUSE);
1451                                 else if (remote_adv & LPA_PAUSE_ASYM)
1452                                         new_tg3_flags |=
1453                                                 (TG3_FLAG_RX_PAUSE);
1454                         } else {
1455                                 if (remote_adv & LPA_PAUSE_CAP)
1456                                         new_tg3_flags |=
1457                                                 (TG3_FLAG_RX_PAUSE |
1458                                                 TG3_FLAG_TX_PAUSE);
1459                         }
1460                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1461                         if ((remote_adv & LPA_PAUSE_CAP) &&
1462                         (remote_adv & LPA_PAUSE_ASYM))
1463                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1464                 }
1465
1466                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1467                 tp->tg3_flags |= new_tg3_flags;
1468         } else {
1469                 new_tg3_flags = tp->tg3_flags;
1470         }
1471
1472         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1473                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1474         else
1475                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1476
1477         if (old_rx_mode != tp->rx_mode) {
1478                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1479         }
1480         
1481         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1482                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1483         else
1484                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1485
1486         if (old_tx_mode != tp->tx_mode) {
1487                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1488         }
1489 }
1490
1491 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1492 {
1493         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1494         case MII_TG3_AUX_STAT_10HALF:
1495                 *speed = SPEED_10;
1496                 *duplex = DUPLEX_HALF;
1497                 break;
1498
1499         case MII_TG3_AUX_STAT_10FULL:
1500                 *speed = SPEED_10;
1501                 *duplex = DUPLEX_FULL;
1502                 break;
1503
1504         case MII_TG3_AUX_STAT_100HALF:
1505                 *speed = SPEED_100;
1506                 *duplex = DUPLEX_HALF;
1507                 break;
1508
1509         case MII_TG3_AUX_STAT_100FULL:
1510                 *speed = SPEED_100;
1511                 *duplex = DUPLEX_FULL;
1512                 break;
1513
1514         case MII_TG3_AUX_STAT_1000HALF:
1515                 *speed = SPEED_1000;
1516                 *duplex = DUPLEX_HALF;
1517                 break;
1518
1519         case MII_TG3_AUX_STAT_1000FULL:
1520                 *speed = SPEED_1000;
1521                 *duplex = DUPLEX_FULL;
1522                 break;
1523
1524         default:
1525                 *speed = SPEED_INVALID;
1526                 *duplex = DUPLEX_INVALID;
1527                 break;
1528         };
1529 }
1530
1531 static void tg3_phy_copper_begin(struct tg3 *tp)
1532 {
1533         u32 new_adv;
1534         int i;
1535
1536         if (tp->link_config.phy_is_low_power) {
1537                 /* Entering low power mode.  Disable gigabit and
1538                  * 100baseT advertisements.
1539                  */
1540                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1541
1542                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1543                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1544                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1545                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1546
1547                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1548         } else if (tp->link_config.speed == SPEED_INVALID) {
1549                 tp->link_config.advertising =
1550                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1551                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1552                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1553                          ADVERTISED_Autoneg | ADVERTISED_MII);
1554
1555                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1556                         tp->link_config.advertising &=
1557                                 ~(ADVERTISED_1000baseT_Half |
1558                                   ADVERTISED_1000baseT_Full);
1559
1560                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1561                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1562                         new_adv |= ADVERTISE_10HALF;
1563                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1564                         new_adv |= ADVERTISE_10FULL;
1565                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1566                         new_adv |= ADVERTISE_100HALF;
1567                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1568                         new_adv |= ADVERTISE_100FULL;
1569                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1570
1571                 if (tp->link_config.advertising &
1572                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1573                         new_adv = 0;
1574                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1575                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1576                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1577                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1578                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1579                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1580                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1581                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1582                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1583                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1584                 } else {
1585                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1586                 }
1587         } else {
1588                 /* Asking for a specific link mode. */
1589                 if (tp->link_config.speed == SPEED_1000) {
1590                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1591                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1592
1593                         if (tp->link_config.duplex == DUPLEX_FULL)
1594                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1595                         else
1596                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1597                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1598                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1599                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1600                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1601                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1602                 } else {
1603                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1604
1605                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1606                         if (tp->link_config.speed == SPEED_100) {
1607                                 if (tp->link_config.duplex == DUPLEX_FULL)
1608                                         new_adv |= ADVERTISE_100FULL;
1609                                 else
1610                                         new_adv |= ADVERTISE_100HALF;
1611                         } else {
1612                                 if (tp->link_config.duplex == DUPLEX_FULL)
1613                                         new_adv |= ADVERTISE_10FULL;
1614                                 else
1615                                         new_adv |= ADVERTISE_10HALF;
1616                         }
1617                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1618                 }
1619         }
1620
1621         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1622             tp->link_config.speed != SPEED_INVALID) {
1623                 u32 bmcr, orig_bmcr;
1624
1625                 tp->link_config.active_speed = tp->link_config.speed;
1626                 tp->link_config.active_duplex = tp->link_config.duplex;
1627
1628                 bmcr = 0;
1629                 switch (tp->link_config.speed) {
1630                 default:
1631                 case SPEED_10:
1632                         break;
1633
1634                 case SPEED_100:
1635                         bmcr |= BMCR_SPEED100;
1636                         break;
1637
1638                 case SPEED_1000:
1639                         bmcr |= TG3_BMCR_SPEED1000;
1640                         break;
1641                 };
1642
1643                 if (tp->link_config.duplex == DUPLEX_FULL)
1644                         bmcr |= BMCR_FULLDPLX;
1645
1646                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1647                     (bmcr != orig_bmcr)) {
1648                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1649                         for (i = 0; i < 1500; i++) {
1650                                 u32 tmp;
1651
1652                                 udelay(10);
1653                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1654                                     tg3_readphy(tp, MII_BMSR, &tmp))
1655                                         continue;
1656                                 if (!(tmp & BMSR_LSTATUS)) {
1657                                         udelay(40);
1658                                         break;
1659                                 }
1660                         }
1661                         tg3_writephy(tp, MII_BMCR, bmcr);
1662                         udelay(40);
1663                 }
1664         } else {
1665                 tg3_writephy(tp, MII_BMCR,
1666                              BMCR_ANENABLE | BMCR_ANRESTART);
1667         }
1668 }
1669
1670 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1671 {
1672         int err;
1673
1674         /* Turn off tap power management. */
1675         /* Set Extended packet length bit */
1676         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1677
1678         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1679         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1680
1681         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1682         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1683
1684         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1685         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1686
1687         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1692
1693         udelay(40);
1694
1695         return err;
1696 }
1697
1698 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1699 {
1700         u32 adv_reg, all_mask;
1701
1702         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1703                 return 0;
1704
1705         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1706                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1707         if ((adv_reg & all_mask) != all_mask)
1708                 return 0;
1709         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1710                 u32 tg3_ctrl;
1711
1712                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1713                         return 0;
1714
1715                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1716                             MII_TG3_CTRL_ADV_1000_FULL);
1717                 if ((tg3_ctrl & all_mask) != all_mask)
1718                         return 0;
1719         }
1720         return 1;
1721 }
1722
1723 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1724 {
1725         int current_link_up;
1726         u32 bmsr, dummy;
1727         u16 current_speed;
1728         u8 current_duplex;
1729         int i, err;
1730
1731         tw32(MAC_EVENT, 0);
1732
1733         tw32_f(MAC_STATUS,
1734              (MAC_STATUS_SYNC_CHANGED |
1735               MAC_STATUS_CFG_CHANGED |
1736               MAC_STATUS_MI_COMPLETION |
1737               MAC_STATUS_LNKSTATE_CHANGED));
1738         udelay(40);
1739
1740         tp->mi_mode = MAC_MI_MODE_BASE;
1741         tw32_f(MAC_MI_MODE, tp->mi_mode);
1742         udelay(80);
1743
1744         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1745
1746         /* Some third-party PHYs need to be reset on link going
1747          * down.
1748          */
1749         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1750              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1751              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1752             netif_carrier_ok(tp->dev)) {
1753                 tg3_readphy(tp, MII_BMSR, &bmsr);
1754                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1755                     !(bmsr & BMSR_LSTATUS))
1756                         force_reset = 1;
1757         }
1758         if (force_reset)
1759                 tg3_phy_reset(tp);
1760
1761         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1762                 tg3_readphy(tp, MII_BMSR, &bmsr);
1763                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1764                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1765                         bmsr = 0;
1766
1767                 if (!(bmsr & BMSR_LSTATUS)) {
1768                         err = tg3_init_5401phy_dsp(tp);
1769                         if (err)
1770                                 return err;
1771
1772                         tg3_readphy(tp, MII_BMSR, &bmsr);
1773                         for (i = 0; i < 1000; i++) {
1774                                 udelay(10);
1775                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1776                                     (bmsr & BMSR_LSTATUS)) {
1777                                         udelay(40);
1778                                         break;
1779                                 }
1780                         }
1781
1782                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1783                             !(bmsr & BMSR_LSTATUS) &&
1784                             tp->link_config.active_speed == SPEED_1000) {
1785                                 err = tg3_phy_reset(tp);
1786                                 if (!err)
1787                                         err = tg3_init_5401phy_dsp(tp);
1788                                 if (err)
1789                                         return err;
1790                         }
1791                 }
1792         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1793                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1794                 /* 5701 {A0,B0} CRC bug workaround */
1795                 tg3_writephy(tp, 0x15, 0x0a75);
1796                 tg3_writephy(tp, 0x1c, 0x8c68);
1797                 tg3_writephy(tp, 0x1c, 0x8d68);
1798                 tg3_writephy(tp, 0x1c, 0x8c68);
1799         }
1800
1801         /* Clear pending interrupts... */
1802         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1803         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1804
1805         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1806                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1807         else
1808                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1809
1810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1812                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1813                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1814                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1815                 else
1816                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1817         }
1818
1819         current_link_up = 0;
1820         current_speed = SPEED_INVALID;
1821         current_duplex = DUPLEX_INVALID;
1822
1823         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1824                 u32 val;
1825
1826                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1827                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1828                 if (!(val & (1 << 10))) {
1829                         val |= (1 << 10);
1830                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1831                         goto relink;
1832                 }
1833         }
1834
1835         bmsr = 0;
1836         for (i = 0; i < 100; i++) {
1837                 tg3_readphy(tp, MII_BMSR, &bmsr);
1838                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1839                     (bmsr & BMSR_LSTATUS))
1840                         break;
1841                 udelay(40);
1842         }
1843
1844         if (bmsr & BMSR_LSTATUS) {
1845                 u32 aux_stat, bmcr;
1846
1847                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1848                 for (i = 0; i < 2000; i++) {
1849                         udelay(10);
1850                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1851                             aux_stat)
1852                                 break;
1853                 }
1854
1855                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1856                                              &current_speed,
1857                                              &current_duplex);
1858
1859                 bmcr = 0;
1860                 for (i = 0; i < 200; i++) {
1861                         tg3_readphy(tp, MII_BMCR, &bmcr);
1862                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1863                                 continue;
1864                         if (bmcr && bmcr != 0x7fff)
1865                                 break;
1866                         udelay(10);
1867                 }
1868
1869                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1870                         if (bmcr & BMCR_ANENABLE) {
1871                                 current_link_up = 1;
1872
1873                                 /* Force autoneg restart if we are exiting
1874                                  * low power mode.
1875                                  */
1876                                 if (!tg3_copper_is_advertising_all(tp))
1877                                         current_link_up = 0;
1878                         } else {
1879                                 current_link_up = 0;
1880                         }
1881                 } else {
1882                         if (!(bmcr & BMCR_ANENABLE) &&
1883                             tp->link_config.speed == current_speed &&
1884                             tp->link_config.duplex == current_duplex) {
1885                                 current_link_up = 1;
1886                         } else {
1887                                 current_link_up = 0;
1888                         }
1889                 }
1890
1891                 tp->link_config.active_speed = current_speed;
1892                 tp->link_config.active_duplex = current_duplex;
1893         }
1894
1895         if (current_link_up == 1 &&
1896             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1897             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1898                 u32 local_adv, remote_adv;
1899
1900                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1901                         local_adv = 0;
1902                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1903
1904                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1905                         remote_adv = 0;
1906
1907                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1908
1909                 /* If we are not advertising full pause capability,
1910                  * something is wrong.  Bring the link down and reconfigure.
1911                  */
1912                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1913                         current_link_up = 0;
1914                 } else {
1915                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1916                 }
1917         }
1918 relink:
1919         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1920                 u32 tmp;
1921
1922                 tg3_phy_copper_begin(tp);
1923
1924                 tg3_readphy(tp, MII_BMSR, &tmp);
1925                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1926                     (tmp & BMSR_LSTATUS))
1927                         current_link_up = 1;
1928         }
1929
1930         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1931         if (current_link_up == 1) {
1932                 if (tp->link_config.active_speed == SPEED_100 ||
1933                     tp->link_config.active_speed == SPEED_10)
1934                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1935                 else
1936                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1937         } else
1938                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1939
1940         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1941         if (tp->link_config.active_duplex == DUPLEX_HALF)
1942                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1943
1944         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1946                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1947                     (current_link_up == 1 &&
1948                      tp->link_config.active_speed == SPEED_10))
1949                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1950         } else {
1951                 if (current_link_up == 1)
1952                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953         }
1954
1955         /* ??? Without this setting Netgear GA302T PHY does not
1956          * ??? send/receive packets...
1957          */
1958         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1959             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1960                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1961                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1962                 udelay(80);
1963         }
1964
1965         tw32_f(MAC_MODE, tp->mac_mode);
1966         udelay(40);
1967
1968         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1969                 /* Polled via timer. */
1970                 tw32_f(MAC_EVENT, 0);
1971         } else {
1972                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1973         }
1974         udelay(40);
1975
1976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1977             current_link_up == 1 &&
1978             tp->link_config.active_speed == SPEED_1000 &&
1979             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1980              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1981                 udelay(120);
1982                 tw32_f(MAC_STATUS,
1983                      (MAC_STATUS_SYNC_CHANGED |
1984                       MAC_STATUS_CFG_CHANGED));
1985                 udelay(40);
1986                 tg3_write_mem(tp,
1987                               NIC_SRAM_FIRMWARE_MBOX,
1988                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1989         }
1990
1991         if (current_link_up != netif_carrier_ok(tp->dev)) {
1992                 if (current_link_up)
1993                         netif_carrier_on(tp->dev);
1994                 else
1995                         netif_carrier_off(tp->dev);
1996                 tg3_link_report(tp);
1997         }
1998
1999         return 0;
2000 }
2001
2002 struct tg3_fiber_aneginfo {
2003         int state;
2004 #define ANEG_STATE_UNKNOWN              0
2005 #define ANEG_STATE_AN_ENABLE            1
2006 #define ANEG_STATE_RESTART_INIT         2
2007 #define ANEG_STATE_RESTART              3
2008 #define ANEG_STATE_DISABLE_LINK_OK      4
2009 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2010 #define ANEG_STATE_ABILITY_DETECT       6
2011 #define ANEG_STATE_ACK_DETECT_INIT      7
2012 #define ANEG_STATE_ACK_DETECT           8
2013 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2014 #define ANEG_STATE_COMPLETE_ACK         10
2015 #define ANEG_STATE_IDLE_DETECT_INIT     11
2016 #define ANEG_STATE_IDLE_DETECT          12
2017 #define ANEG_STATE_LINK_OK              13
2018 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2019 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2020
2021         u32 flags;
2022 #define MR_AN_ENABLE            0x00000001
2023 #define MR_RESTART_AN           0x00000002
2024 #define MR_AN_COMPLETE          0x00000004
2025 #define MR_PAGE_RX              0x00000008
2026 #define MR_NP_LOADED            0x00000010
2027 #define MR_TOGGLE_TX            0x00000020
2028 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2029 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2030 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2031 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2032 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2033 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2034 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2035 #define MR_TOGGLE_RX            0x00002000
2036 #define MR_NP_RX                0x00004000
2037
2038 #define MR_LINK_OK              0x80000000
2039
2040         unsigned long link_time, cur_time;
2041
2042         u32 ability_match_cfg;
2043         int ability_match_count;
2044
2045         char ability_match, idle_match, ack_match;
2046
2047         u32 txconfig, rxconfig;
2048 #define ANEG_CFG_NP             0x00000080
2049 #define ANEG_CFG_ACK            0x00000040
2050 #define ANEG_CFG_RF2            0x00000020
2051 #define ANEG_CFG_RF1            0x00000010
2052 #define ANEG_CFG_PS2            0x00000001
2053 #define ANEG_CFG_PS1            0x00008000
2054 #define ANEG_CFG_HD             0x00004000
2055 #define ANEG_CFG_FD             0x00002000
2056 #define ANEG_CFG_INVAL          0x00001f06
2057
2058 };
2059 #define ANEG_OK         0
2060 #define ANEG_DONE       1
2061 #define ANEG_TIMER_ENAB 2
2062 #define ANEG_FAILED     -1
2063
2064 #define ANEG_STATE_SETTLE_TIME  10000
2065
2066 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2067                                    struct tg3_fiber_aneginfo *ap)
2068 {
2069         unsigned long delta;
2070         u32 rx_cfg_reg;
2071         int ret;
2072
2073         if (ap->state == ANEG_STATE_UNKNOWN) {
2074                 ap->rxconfig = 0;
2075                 ap->link_time = 0;
2076                 ap->cur_time = 0;
2077                 ap->ability_match_cfg = 0;
2078                 ap->ability_match_count = 0;
2079                 ap->ability_match = 0;
2080                 ap->idle_match = 0;
2081                 ap->ack_match = 0;
2082         }
2083         ap->cur_time++;
2084
2085         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2086                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2087
2088                 if (rx_cfg_reg != ap->ability_match_cfg) {
2089                         ap->ability_match_cfg = rx_cfg_reg;
2090                         ap->ability_match = 0;
2091                         ap->ability_match_count = 0;
2092                 } else {
2093                         if (++ap->ability_match_count > 1) {
2094                                 ap->ability_match = 1;
2095                                 ap->ability_match_cfg = rx_cfg_reg;
2096                         }
2097                 }
2098                 if (rx_cfg_reg & ANEG_CFG_ACK)
2099                         ap->ack_match = 1;
2100                 else
2101                         ap->ack_match = 0;
2102
2103                 ap->idle_match = 0;
2104         } else {
2105                 ap->idle_match = 1;
2106                 ap->ability_match_cfg = 0;
2107                 ap->ability_match_count = 0;
2108                 ap->ability_match = 0;
2109                 ap->ack_match = 0;
2110
2111                 rx_cfg_reg = 0;
2112         }
2113
2114         ap->rxconfig = rx_cfg_reg;
2115         ret = ANEG_OK;
2116
2117         switch(ap->state) {
2118         case ANEG_STATE_UNKNOWN:
2119                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2120                         ap->state = ANEG_STATE_AN_ENABLE;
2121
2122                 /* fallthru */
2123         case ANEG_STATE_AN_ENABLE:
2124                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2125                 if (ap->flags & MR_AN_ENABLE) {
2126                         ap->link_time = 0;
2127                         ap->cur_time = 0;
2128                         ap->ability_match_cfg = 0;
2129                         ap->ability_match_count = 0;
2130                         ap->ability_match = 0;
2131                         ap->idle_match = 0;
2132                         ap->ack_match = 0;
2133
2134                         ap->state = ANEG_STATE_RESTART_INIT;
2135                 } else {
2136                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2137                 }
2138                 break;
2139
2140         case ANEG_STATE_RESTART_INIT:
2141                 ap->link_time = ap->cur_time;
2142                 ap->flags &= ~(MR_NP_LOADED);
2143                 ap->txconfig = 0;
2144                 tw32(MAC_TX_AUTO_NEG, 0);
2145                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2146                 tw32_f(MAC_MODE, tp->mac_mode);
2147                 udelay(40);
2148
2149                 ret = ANEG_TIMER_ENAB;
2150                 ap->state = ANEG_STATE_RESTART;
2151
2152                 /* fallthru */
2153         case ANEG_STATE_RESTART:
2154                 delta = ap->cur_time - ap->link_time;
2155                 if (delta > ANEG_STATE_SETTLE_TIME) {
2156                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2157                 } else {
2158                         ret = ANEG_TIMER_ENAB;
2159                 }
2160                 break;
2161
2162         case ANEG_STATE_DISABLE_LINK_OK:
2163                 ret = ANEG_DONE;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT_INIT:
2167                 ap->flags &= ~(MR_TOGGLE_TX);
2168                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2169                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2170                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2171                 tw32_f(MAC_MODE, tp->mac_mode);
2172                 udelay(40);
2173
2174                 ap->state = ANEG_STATE_ABILITY_DETECT;
2175                 break;
2176
2177         case ANEG_STATE_ABILITY_DETECT:
2178                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2179                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2180                 }
2181                 break;
2182
2183         case ANEG_STATE_ACK_DETECT_INIT:
2184                 ap->txconfig |= ANEG_CFG_ACK;
2185                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2186                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2187                 tw32_f(MAC_MODE, tp->mac_mode);
2188                 udelay(40);
2189
2190                 ap->state = ANEG_STATE_ACK_DETECT;
2191
2192                 /* fallthru */
2193         case ANEG_STATE_ACK_DETECT:
2194                 if (ap->ack_match != 0) {
2195                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2196                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2197                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2198                         } else {
2199                                 ap->state = ANEG_STATE_AN_ENABLE;
2200                         }
2201                 } else if (ap->ability_match != 0 &&
2202                            ap->rxconfig == 0) {
2203                         ap->state = ANEG_STATE_AN_ENABLE;
2204                 }
2205                 break;
2206
2207         case ANEG_STATE_COMPLETE_ACK_INIT:
2208                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2209                         ret = ANEG_FAILED;
2210                         break;
2211                 }
2212                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2213                                MR_LP_ADV_HALF_DUPLEX |
2214                                MR_LP_ADV_SYM_PAUSE |
2215                                MR_LP_ADV_ASYM_PAUSE |
2216                                MR_LP_ADV_REMOTE_FAULT1 |
2217                                MR_LP_ADV_REMOTE_FAULT2 |
2218                                MR_LP_ADV_NEXT_PAGE |
2219                                MR_TOGGLE_RX |
2220                                MR_NP_RX);
2221                 if (ap->rxconfig & ANEG_CFG_FD)
2222                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2223                 if (ap->rxconfig & ANEG_CFG_HD)
2224                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2225                 if (ap->rxconfig & ANEG_CFG_PS1)
2226                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2227                 if (ap->rxconfig & ANEG_CFG_PS2)
2228                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2229                 if (ap->rxconfig & ANEG_CFG_RF1)
2230                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2231                 if (ap->rxconfig & ANEG_CFG_RF2)
2232                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2233                 if (ap->rxconfig & ANEG_CFG_NP)
2234                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2235
2236                 ap->link_time = ap->cur_time;
2237
2238                 ap->flags ^= (MR_TOGGLE_TX);
2239                 if (ap->rxconfig & 0x0008)
2240                         ap->flags |= MR_TOGGLE_RX;
2241                 if (ap->rxconfig & ANEG_CFG_NP)
2242                         ap->flags |= MR_NP_RX;
2243                 ap->flags |= MR_PAGE_RX;
2244
2245                 ap->state = ANEG_STATE_COMPLETE_ACK;
2246                 ret = ANEG_TIMER_ENAB;
2247                 break;
2248
2249         case ANEG_STATE_COMPLETE_ACK:
2250                 if (ap->ability_match != 0 &&
2251                     ap->rxconfig == 0) {
2252                         ap->state = ANEG_STATE_AN_ENABLE;
2253                         break;
2254                 }
2255                 delta = ap->cur_time - ap->link_time;
2256                 if (delta > ANEG_STATE_SETTLE_TIME) {
2257                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2258                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2259                         } else {
2260                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2261                                     !(ap->flags & MR_NP_RX)) {
2262                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263                                 } else {
2264                                         ret = ANEG_FAILED;
2265                                 }
2266                         }
2267                 }
2268                 break;
2269
2270         case ANEG_STATE_IDLE_DETECT_INIT:
2271                 ap->link_time = ap->cur_time;
2272                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2273                 tw32_f(MAC_MODE, tp->mac_mode);
2274                 udelay(40);
2275
2276                 ap->state = ANEG_STATE_IDLE_DETECT;
2277                 ret = ANEG_TIMER_ENAB;
2278                 break;
2279
2280         case ANEG_STATE_IDLE_DETECT:
2281                 if (ap->ability_match != 0 &&
2282                     ap->rxconfig == 0) {
2283                         ap->state = ANEG_STATE_AN_ENABLE;
2284                         break;
2285                 }
2286                 delta = ap->cur_time - ap->link_time;
2287                 if (delta > ANEG_STATE_SETTLE_TIME) {
2288                         /* XXX another gem from the Broadcom driver :( */
2289                         ap->state = ANEG_STATE_LINK_OK;
2290                 }
2291                 break;
2292
2293         case ANEG_STATE_LINK_OK:
2294                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2295                 ret = ANEG_DONE;
2296                 break;
2297
2298         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2299                 /* ??? unimplemented */
2300                 break;
2301
2302         case ANEG_STATE_NEXT_PAGE_WAIT:
2303                 /* ??? unimplemented */
2304                 break;
2305
2306         default:
2307                 ret = ANEG_FAILED;
2308                 break;
2309         };
2310
2311         return ret;
2312 }
2313
2314 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2315 {
2316         int res = 0;
2317         struct tg3_fiber_aneginfo aninfo;
2318         int status = ANEG_FAILED;
2319         unsigned int tick;
2320         u32 tmp;
2321
2322         tw32_f(MAC_TX_AUTO_NEG, 0);
2323
2324         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2325         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2326         udelay(40);
2327
2328         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2329         udelay(40);
2330
2331         memset(&aninfo, 0, sizeof(aninfo));
2332         aninfo.flags |= MR_AN_ENABLE;
2333         aninfo.state = ANEG_STATE_UNKNOWN;
2334         aninfo.cur_time = 0;
2335         tick = 0;
2336         while (++tick < 195000) {
2337                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2338                 if (status == ANEG_DONE || status == ANEG_FAILED)
2339                         break;
2340
2341                 udelay(1);
2342         }
2343
2344         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2345         tw32_f(MAC_MODE, tp->mac_mode);
2346         udelay(40);
2347
2348         *flags = aninfo.flags;
2349
2350         if (status == ANEG_DONE &&
2351             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2352                              MR_LP_ADV_FULL_DUPLEX)))
2353                 res = 1;
2354
2355         return res;
2356 }
2357
2358 static void tg3_init_bcm8002(struct tg3 *tp)
2359 {
2360         u32 mac_status = tr32(MAC_STATUS);
2361         int i;
2362
2363         /* Reset when initting first time or we have a link. */
2364         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2365             !(mac_status & MAC_STATUS_PCS_SYNCED))
2366                 return;
2367
2368         /* Set PLL lock range. */
2369         tg3_writephy(tp, 0x16, 0x8007);
2370
2371         /* SW reset */
2372         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2373
2374         /* Wait for reset to complete. */
2375         /* XXX schedule_timeout() ... */
2376         for (i = 0; i < 500; i++)
2377                 udelay(10);
2378
2379         /* Config mode; select PMA/Ch 1 regs. */
2380         tg3_writephy(tp, 0x10, 0x8411);
2381
2382         /* Enable auto-lock and comdet, select txclk for tx. */
2383         tg3_writephy(tp, 0x11, 0x0a10);
2384
2385         tg3_writephy(tp, 0x18, 0x00a0);
2386         tg3_writephy(tp, 0x16, 0x41ff);
2387
2388         /* Assert and deassert POR. */
2389         tg3_writephy(tp, 0x13, 0x0400);
2390         udelay(40);
2391         tg3_writephy(tp, 0x13, 0x0000);
2392
2393         tg3_writephy(tp, 0x11, 0x0a50);
2394         udelay(40);
2395         tg3_writephy(tp, 0x11, 0x0a10);
2396
2397         /* Wait for signal to stabilize */
2398         /* XXX schedule_timeout() ... */
2399         for (i = 0; i < 15000; i++)
2400                 udelay(10);
2401
2402         /* Deselect the channel register so we can read the PHYID
2403          * later.
2404          */
2405         tg3_writephy(tp, 0x10, 0x8011);
2406 }
2407
2408 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2409 {
2410         u32 sg_dig_ctrl, sg_dig_status;
2411         u32 serdes_cfg, expected_sg_dig_ctrl;
2412         int workaround, port_a;
2413         int current_link_up;
2414
2415         serdes_cfg = 0;
2416         expected_sg_dig_ctrl = 0;
2417         workaround = 0;
2418         port_a = 1;
2419         current_link_up = 0;
2420
2421         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2422             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2423                 workaround = 1;
2424                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2425                         port_a = 0;
2426
2427                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2428                 /* preserve bits 20-23 for voltage regulator */
2429                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2430         }
2431
2432         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2433
2434         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2435                 if (sg_dig_ctrl & (1 << 31)) {
2436                         if (workaround) {
2437                                 u32 val = serdes_cfg;
2438
2439                                 if (port_a)
2440                                         val |= 0xc010000;
2441                                 else
2442                                         val |= 0x4010000;
2443                                 tw32_f(MAC_SERDES_CFG, val);
2444                         }
2445                         tw32_f(SG_DIG_CTRL, 0x01388400);
2446                 }
2447                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2448                         tg3_setup_flow_control(tp, 0, 0);
2449                         current_link_up = 1;
2450                 }
2451                 goto out;
2452         }
2453
2454         /* Want auto-negotiation.  */
2455         expected_sg_dig_ctrl = 0x81388400;
2456
2457         /* Pause capability */
2458         expected_sg_dig_ctrl |= (1 << 11);
2459
2460         /* Asymettric pause */
2461         expected_sg_dig_ctrl |= (1 << 12);
2462
2463         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2464                 if (workaround)
2465                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2466                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2467                 udelay(5);
2468                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2469
2470                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2471         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2472                                  MAC_STATUS_SIGNAL_DET)) {
2473                 int i;
2474
2475                 /* Giver time to negotiate (~200ms) */
2476                 for (i = 0; i < 40000; i++) {
2477                         sg_dig_status = tr32(SG_DIG_STATUS);
2478                         if (sg_dig_status & (0x3))
2479                                 break;
2480                         udelay(5);
2481                 }
2482                 mac_status = tr32(MAC_STATUS);
2483
2484                 if ((sg_dig_status & (1 << 1)) &&
2485                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2486                         u32 local_adv, remote_adv;
2487
2488                         local_adv = ADVERTISE_PAUSE_CAP;
2489                         remote_adv = 0;
2490                         if (sg_dig_status & (1 << 19))
2491                                 remote_adv |= LPA_PAUSE_CAP;
2492                         if (sg_dig_status & (1 << 20))
2493                                 remote_adv |= LPA_PAUSE_ASYM;
2494
2495                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2496                         current_link_up = 1;
2497                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2498                 } else if (!(sg_dig_status & (1 << 1))) {
2499                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2500                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501                         else {
2502                                 if (workaround) {
2503                                         u32 val = serdes_cfg;
2504
2505                                         if (port_a)
2506                                                 val |= 0xc010000;
2507                                         else
2508                                                 val |= 0x4010000;
2509
2510                                         tw32_f(MAC_SERDES_CFG, val);
2511                                 }
2512
2513                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2514                                 udelay(40);
2515
2516                                 /* Link parallel detection - link is up */
2517                                 /* only if we have PCS_SYNC and not */
2518                                 /* receiving config code words */
2519                                 mac_status = tr32(MAC_STATUS);
2520                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2521                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2522                                         tg3_setup_flow_control(tp, 0, 0);
2523                                         current_link_up = 1;
2524                                 }
2525                         }
2526                 }
2527         }
2528
2529 out:
2530         return current_link_up;
2531 }
2532
2533 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2534 {
2535         int current_link_up = 0;
2536
2537         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2538                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2539                 goto out;
2540         }
2541
2542         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2543                 u32 flags;
2544                 int i;
2545   
2546                 if (fiber_autoneg(tp, &flags)) {
2547                         u32 local_adv, remote_adv;
2548
2549                         local_adv = ADVERTISE_PAUSE_CAP;
2550                         remote_adv = 0;
2551                         if (flags & MR_LP_ADV_SYM_PAUSE)
2552                                 remote_adv |= LPA_PAUSE_CAP;
2553                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2554                                 remote_adv |= LPA_PAUSE_ASYM;
2555
2556                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2557
2558                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2559                         current_link_up = 1;
2560                 }
2561                 for (i = 0; i < 30; i++) {
2562                         udelay(20);
2563                         tw32_f(MAC_STATUS,
2564                                (MAC_STATUS_SYNC_CHANGED |
2565                                 MAC_STATUS_CFG_CHANGED));
2566                         udelay(40);
2567                         if ((tr32(MAC_STATUS) &
2568                              (MAC_STATUS_SYNC_CHANGED |
2569                               MAC_STATUS_CFG_CHANGED)) == 0)
2570                                 break;
2571                 }
2572
2573                 mac_status = tr32(MAC_STATUS);
2574                 if (current_link_up == 0 &&
2575                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2576                     !(mac_status & MAC_STATUS_RCVD_CFG))
2577                         current_link_up = 1;
2578         } else {
2579                 /* Forcing 1000FD link up. */
2580                 current_link_up = 1;
2581                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2582
2583                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2584                 udelay(40);
2585         }
2586
2587 out:
2588         return current_link_up;
2589 }
2590
2591 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2592 {
2593         u32 orig_pause_cfg;
2594         u16 orig_active_speed;
2595         u8 orig_active_duplex;
2596         u32 mac_status;
2597         int current_link_up;
2598         int i;
2599
2600         orig_pause_cfg =
2601                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2602                                   TG3_FLAG_TX_PAUSE));
2603         orig_active_speed = tp->link_config.active_speed;
2604         orig_active_duplex = tp->link_config.active_duplex;
2605
2606         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2607             netif_carrier_ok(tp->dev) &&
2608             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2609                 mac_status = tr32(MAC_STATUS);
2610                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2611                                MAC_STATUS_SIGNAL_DET |
2612                                MAC_STATUS_CFG_CHANGED |
2613                                MAC_STATUS_RCVD_CFG);
2614                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2615                                    MAC_STATUS_SIGNAL_DET)) {
2616                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2617                                             MAC_STATUS_CFG_CHANGED));
2618                         return 0;
2619                 }
2620         }
2621
2622         tw32_f(MAC_TX_AUTO_NEG, 0);
2623
2624         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2625         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2626         tw32_f(MAC_MODE, tp->mac_mode);
2627         udelay(40);
2628
2629         if (tp->phy_id == PHY_ID_BCM8002)
2630                 tg3_init_bcm8002(tp);
2631
2632         /* Enable link change event even when serdes polling.  */
2633         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2634         udelay(40);
2635
2636         current_link_up = 0;
2637         mac_status = tr32(MAC_STATUS);
2638
2639         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2640                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2641         else
2642                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2643
2644         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2645         tw32_f(MAC_MODE, tp->mac_mode);
2646         udelay(40);
2647
2648         tp->hw_status->status =
2649                 (SD_STATUS_UPDATED |
2650                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2651
2652         for (i = 0; i < 100; i++) {
2653                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2654                                     MAC_STATUS_CFG_CHANGED));
2655                 udelay(5);
2656                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2657                                          MAC_STATUS_CFG_CHANGED)) == 0)
2658                         break;
2659         }
2660
2661         mac_status = tr32(MAC_STATUS);
2662         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2663                 current_link_up = 0;
2664                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2665                         tw32_f(MAC_MODE, (tp->mac_mode |
2666                                           MAC_MODE_SEND_CONFIGS));
2667                         udelay(1);
2668                         tw32_f(MAC_MODE, tp->mac_mode);
2669                 }
2670         }
2671
2672         if (current_link_up == 1) {
2673                 tp->link_config.active_speed = SPEED_1000;
2674                 tp->link_config.active_duplex = DUPLEX_FULL;
2675                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2676                                     LED_CTRL_LNKLED_OVERRIDE |
2677                                     LED_CTRL_1000MBPS_ON));
2678         } else {
2679                 tp->link_config.active_speed = SPEED_INVALID;
2680                 tp->link_config.active_duplex = DUPLEX_INVALID;
2681                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2682                                     LED_CTRL_LNKLED_OVERRIDE |
2683                                     LED_CTRL_TRAFFIC_OVERRIDE));
2684         }
2685
2686         if (current_link_up != netif_carrier_ok(tp->dev)) {
2687                 if (current_link_up)
2688                         netif_carrier_on(tp->dev);
2689                 else
2690                         netif_carrier_off(tp->dev);
2691                 tg3_link_report(tp);
2692         } else {
2693                 u32 now_pause_cfg =
2694                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2695                                          TG3_FLAG_TX_PAUSE);
2696                 if (orig_pause_cfg != now_pause_cfg ||
2697                     orig_active_speed != tp->link_config.active_speed ||
2698                     orig_active_duplex != tp->link_config.active_duplex)
2699                         tg3_link_report(tp);
2700         }
2701
2702         return 0;
2703 }
2704
2705 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2706 {
2707         int current_link_up, err = 0;
2708         u32 bmsr, bmcr;
2709         u16 current_speed;
2710         u8 current_duplex;
2711
2712         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2713         tw32_f(MAC_MODE, tp->mac_mode);
2714         udelay(40);
2715
2716         tw32(MAC_EVENT, 0);
2717
2718         tw32_f(MAC_STATUS,
2719              (MAC_STATUS_SYNC_CHANGED |
2720               MAC_STATUS_CFG_CHANGED |
2721               MAC_STATUS_MI_COMPLETION |
2722               MAC_STATUS_LNKSTATE_CHANGED));
2723         udelay(40);
2724
2725         if (force_reset)
2726                 tg3_phy_reset(tp);
2727
2728         current_link_up = 0;
2729         current_speed = SPEED_INVALID;
2730         current_duplex = DUPLEX_INVALID;
2731
2732         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2733         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2734         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2735                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2736                         bmsr |= BMSR_LSTATUS;
2737                 else
2738                         bmsr &= ~BMSR_LSTATUS;
2739         }
2740
2741         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2742
2743         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2744             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2745                 /* do nothing, just check for link up at the end */
2746         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2747                 u32 adv, new_adv;
2748
2749                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2750                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2751                                   ADVERTISE_1000XPAUSE |
2752                                   ADVERTISE_1000XPSE_ASYM |
2753                                   ADVERTISE_SLCT);
2754
2755                 /* Always advertise symmetric PAUSE just like copper */
2756                 new_adv |= ADVERTISE_1000XPAUSE;
2757
2758                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2759                         new_adv |= ADVERTISE_1000XHALF;
2760                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2761                         new_adv |= ADVERTISE_1000XFULL;
2762
2763                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2764                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2765                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2766                         tg3_writephy(tp, MII_BMCR, bmcr);
2767
2768                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2769                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2770                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2771
2772                         return err;
2773                 }
2774         } else {
2775                 u32 new_bmcr;
2776
2777                 bmcr &= ~BMCR_SPEED1000;
2778                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2779
2780                 if (tp->link_config.duplex == DUPLEX_FULL)
2781                         new_bmcr |= BMCR_FULLDPLX;
2782
2783                 if (new_bmcr != bmcr) {
2784                         /* BMCR_SPEED1000 is a reserved bit that needs
2785                          * to be set on write.
2786                          */
2787                         new_bmcr |= BMCR_SPEED1000;
2788
2789                         /* Force a linkdown */
2790                         if (netif_carrier_ok(tp->dev)) {
2791                                 u32 adv;
2792
2793                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2794                                 adv &= ~(ADVERTISE_1000XFULL |
2795                                          ADVERTISE_1000XHALF |
2796                                          ADVERTISE_SLCT);
2797                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2798                                 tg3_writephy(tp, MII_BMCR, bmcr |
2799                                                            BMCR_ANRESTART |
2800                                                            BMCR_ANENABLE);
2801                                 udelay(10);
2802                                 netif_carrier_off(tp->dev);
2803                         }
2804                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2805                         bmcr = new_bmcr;
2806                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2807                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2808                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2809                             ASIC_REV_5714) {
2810                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2811                                         bmsr |= BMSR_LSTATUS;
2812                                 else
2813                                         bmsr &= ~BMSR_LSTATUS;
2814                         }
2815                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2816                 }
2817         }
2818
2819         if (bmsr & BMSR_LSTATUS) {
2820                 current_speed = SPEED_1000;
2821                 current_link_up = 1;
2822                 if (bmcr & BMCR_FULLDPLX)
2823                         current_duplex = DUPLEX_FULL;
2824                 else
2825                         current_duplex = DUPLEX_HALF;
2826
2827                 if (bmcr & BMCR_ANENABLE) {
2828                         u32 local_adv, remote_adv, common;
2829
2830                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2831                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2832                         common = local_adv & remote_adv;
2833                         if (common & (ADVERTISE_1000XHALF |
2834                                       ADVERTISE_1000XFULL)) {
2835                                 if (common & ADVERTISE_1000XFULL)
2836                                         current_duplex = DUPLEX_FULL;
2837                                 else
2838                                         current_duplex = DUPLEX_HALF;
2839
2840                                 tg3_setup_flow_control(tp, local_adv,
2841                                                        remote_adv);
2842                         }
2843                         else
2844                                 current_link_up = 0;
2845                 }
2846         }
2847
2848         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2849         if (tp->link_config.active_duplex == DUPLEX_HALF)
2850                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2851
2852         tw32_f(MAC_MODE, tp->mac_mode);
2853         udelay(40);
2854
2855         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2856
2857         tp->link_config.active_speed = current_speed;
2858         tp->link_config.active_duplex = current_duplex;
2859
2860         if (current_link_up != netif_carrier_ok(tp->dev)) {
2861                 if (current_link_up)
2862                         netif_carrier_on(tp->dev);
2863                 else {
2864                         netif_carrier_off(tp->dev);
2865                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2866                 }
2867                 tg3_link_report(tp);
2868         }
2869         return err;
2870 }
2871
2872 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2873 {
2874         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2875                 /* Give autoneg time to complete. */
2876                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2877                 return;
2878         }
2879         if (!netif_carrier_ok(tp->dev) &&
2880             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2881                 u32 bmcr;
2882
2883                 tg3_readphy(tp, MII_BMCR, &bmcr);
2884                 if (bmcr & BMCR_ANENABLE) {
2885                         u32 phy1, phy2;
2886
2887                         /* Select shadow register 0x1f */
2888                         tg3_writephy(tp, 0x1c, 0x7c00);
2889                         tg3_readphy(tp, 0x1c, &phy1);
2890
2891                         /* Select expansion interrupt status register */
2892                         tg3_writephy(tp, 0x17, 0x0f01);
2893                         tg3_readphy(tp, 0x15, &phy2);
2894                         tg3_readphy(tp, 0x15, &phy2);
2895
2896                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2897                                 /* We have signal detect and not receiving
2898                                  * config code words, link is up by parallel
2899                                  * detection.
2900                                  */
2901
2902                                 bmcr &= ~BMCR_ANENABLE;
2903                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2904                                 tg3_writephy(tp, MII_BMCR, bmcr);
2905                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2906                         }
2907                 }
2908         }
2909         else if (netif_carrier_ok(tp->dev) &&
2910                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2911                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2912                 u32 phy2;
2913
2914                 /* Select expansion interrupt status register */
2915                 tg3_writephy(tp, 0x17, 0x0f01);
2916                 tg3_readphy(tp, 0x15, &phy2);
2917                 if (phy2 & 0x20) {
2918                         u32 bmcr;
2919
2920                         /* Config code words received, turn on autoneg. */
2921                         tg3_readphy(tp, MII_BMCR, &bmcr);
2922                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2923
2924                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2925
2926                 }
2927         }
2928 }
2929
2930 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2931 {
2932         int err;
2933
2934         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2935                 err = tg3_setup_fiber_phy(tp, force_reset);
2936         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2937                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2938         } else {
2939                 err = tg3_setup_copper_phy(tp, force_reset);
2940         }
2941
2942         if (tp->link_config.active_speed == SPEED_1000 &&
2943             tp->link_config.active_duplex == DUPLEX_HALF)
2944                 tw32(MAC_TX_LENGTHS,
2945                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2946                       (6 << TX_LENGTHS_IPG_SHIFT) |
2947                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2948         else
2949                 tw32(MAC_TX_LENGTHS,
2950                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2951                       (6 << TX_LENGTHS_IPG_SHIFT) |
2952                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2953
2954         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2955                 if (netif_carrier_ok(tp->dev)) {
2956                         tw32(HOSTCC_STAT_COAL_TICKS,
2957                              tp->coal.stats_block_coalesce_usecs);
2958                 } else {
2959                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2960                 }
2961         }
2962
2963         return err;
2964 }
2965
2966 /* This is called whenever we suspect that the system chipset is re-
2967  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2968  * is bogus tx completions. We try to recover by setting the
2969  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2970  * in the workqueue.
2971  */
2972 static void tg3_tx_recover(struct tg3 *tp)
2973 {
2974         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2975                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2976
2977         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2978                "mapped I/O cycles to the network device, attempting to "
2979                "recover. Please report the problem to the driver maintainer "
2980                "and include system chipset information.\n", tp->dev->name);
2981
2982         spin_lock(&tp->lock);
2983         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2984         spin_unlock(&tp->lock);
2985 }
2986
2987 static inline u32 tg3_tx_avail(struct tg3 *tp)
2988 {
2989         smp_mb();
2990         return (tp->tx_pending -
2991                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2992 }
2993
2994 /* Tigon3 never reports partial packet sends.  So we do not
2995  * need special logic to handle SKBs that have not had all
2996  * of their frags sent yet, like SunGEM does.
2997  */
2998 static void tg3_tx(struct tg3 *tp)
2999 {
3000         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3001         u32 sw_idx = tp->tx_cons;
3002
3003         while (sw_idx != hw_idx) {
3004                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3005                 struct sk_buff *skb = ri->skb;
3006                 int i, tx_bug = 0;
3007
3008                 if (unlikely(skb == NULL)) {
3009                         tg3_tx_recover(tp);
3010                         return;
3011                 }
3012
3013                 pci_unmap_single(tp->pdev,
3014                                  pci_unmap_addr(ri, mapping),
3015                                  skb_headlen(skb),
3016                                  PCI_DMA_TODEVICE);
3017
3018                 ri->skb = NULL;
3019
3020                 sw_idx = NEXT_TX(sw_idx);
3021
3022                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3023                         ri = &tp->tx_buffers[sw_idx];
3024                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3025                                 tx_bug = 1;
3026
3027                         pci_unmap_page(tp->pdev,
3028                                        pci_unmap_addr(ri, mapping),
3029                                        skb_shinfo(skb)->frags[i].size,
3030                                        PCI_DMA_TODEVICE);
3031
3032                         sw_idx = NEXT_TX(sw_idx);
3033                 }
3034
3035                 dev_kfree_skb(skb);
3036
3037                 if (unlikely(tx_bug)) {
3038                         tg3_tx_recover(tp);
3039                         return;
3040                 }
3041         }
3042
3043         tp->tx_cons = sw_idx;
3044
3045         /* Need to make the tx_cons update visible to tg3_start_xmit()
3046          * before checking for netif_queue_stopped().  Without the
3047          * memory barrier, there is a small possibility that tg3_start_xmit()
3048          * will miss it and cause the queue to be stopped forever.
3049          */
3050         smp_mb();
3051
3052         if (unlikely(netif_queue_stopped(tp->dev) &&
3053                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3054                 netif_tx_lock(tp->dev);
3055                 if (netif_queue_stopped(tp->dev) &&
3056                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3057                         netif_wake_queue(tp->dev);
3058                 netif_tx_unlock(tp->dev);
3059         }
3060 }
3061
3062 /* Returns size of skb allocated or < 0 on error.
3063  *
3064  * We only need to fill in the address because the other members
3065  * of the RX descriptor are invariant, see tg3_init_rings.
3066  *
3067  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3068  * posting buffers we only dirty the first cache line of the RX
3069  * descriptor (containing the address).  Whereas for the RX status
3070  * buffers the cpu only reads the last cacheline of the RX descriptor
3071  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3072  */
3073 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3074                             int src_idx, u32 dest_idx_unmasked)
3075 {
3076         struct tg3_rx_buffer_desc *desc;
3077         struct ring_info *map, *src_map;
3078         struct sk_buff *skb;
3079         dma_addr_t mapping;
3080         int skb_size, dest_idx;
3081
3082         src_map = NULL;
3083         switch (opaque_key) {
3084         case RXD_OPAQUE_RING_STD:
3085                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3086                 desc = &tp->rx_std[dest_idx];
3087                 map = &tp->rx_std_buffers[dest_idx];
3088                 if (src_idx >= 0)
3089                         src_map = &tp->rx_std_buffers[src_idx];
3090                 skb_size = tp->rx_pkt_buf_sz;
3091                 break;
3092
3093         case RXD_OPAQUE_RING_JUMBO:
3094                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3095                 desc = &tp->rx_jumbo[dest_idx];
3096                 map = &tp->rx_jumbo_buffers[dest_idx];
3097                 if (src_idx >= 0)
3098                         src_map = &tp->rx_jumbo_buffers[src_idx];
3099                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3100                 break;
3101
3102         default:
3103                 return -EINVAL;
3104         };
3105
3106         /* Do not overwrite any of the map or rp information
3107          * until we are sure we can commit to a new buffer.
3108          *
3109          * Callers depend upon this behavior and assume that
3110          * we leave everything unchanged if we fail.
3111          */
3112         skb = netdev_alloc_skb(tp->dev, skb_size);
3113         if (skb == NULL)
3114                 return -ENOMEM;
3115
3116         skb_reserve(skb, tp->rx_offset);
3117
3118         mapping = pci_map_single(tp->pdev, skb->data,
3119                                  skb_size - tp->rx_offset,
3120                                  PCI_DMA_FROMDEVICE);
3121
3122         map->skb = skb;
3123         pci_unmap_addr_set(map, mapping, mapping);
3124
3125         if (src_map != NULL)
3126                 src_map->skb = NULL;
3127
3128         desc->addr_hi = ((u64)mapping >> 32);
3129         desc->addr_lo = ((u64)mapping & 0xffffffff);
3130
3131         return skb_size;
3132 }
3133
3134 /* We only need to move over in the address because the other
3135  * members of the RX descriptor are invariant.  See notes above
3136  * tg3_alloc_rx_skb for full details.
3137  */
3138 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3139                            int src_idx, u32 dest_idx_unmasked)
3140 {
3141         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3142         struct ring_info *src_map, *dest_map;
3143         int dest_idx;
3144
3145         switch (opaque_key) {
3146         case RXD_OPAQUE_RING_STD:
3147                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3148                 dest_desc = &tp->rx_std[dest_idx];
3149                 dest_map = &tp->rx_std_buffers[dest_idx];
3150                 src_desc = &tp->rx_std[src_idx];
3151                 src_map = &tp->rx_std_buffers[src_idx];
3152                 break;
3153
3154         case RXD_OPAQUE_RING_JUMBO:
3155                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3156                 dest_desc = &tp->rx_jumbo[dest_idx];
3157                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3158                 src_desc = &tp->rx_jumbo[src_idx];
3159                 src_map = &tp->rx_jumbo_buffers[src_idx];
3160                 break;
3161
3162         default:
3163                 return;
3164         };
3165
3166         dest_map->skb = src_map->skb;
3167         pci_unmap_addr_set(dest_map, mapping,
3168                            pci_unmap_addr(src_map, mapping));
3169         dest_desc->addr_hi = src_desc->addr_hi;
3170         dest_desc->addr_lo = src_desc->addr_lo;
3171
3172         src_map->skb = NULL;
3173 }
3174
3175 #if TG3_VLAN_TAG_USED
3176 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3177 {
3178         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3179 }
3180 #endif
3181
3182 /* The RX ring scheme is composed of multiple rings which post fresh
3183  * buffers to the chip, and one special ring the chip uses to report
3184  * status back to the host.
3185  *
3186  * The special ring reports the status of received packets to the
3187  * host.  The chip does not write into the original descriptor the
3188  * RX buffer was obtained from.  The chip simply takes the original
3189  * descriptor as provided by the host, updates the status and length
3190  * field, then writes this into the next status ring entry.
3191  *
3192  * Each ring the host uses to post buffers to the chip is described
3193  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3194  * it is first placed into the on-chip ram.  When the packet's length
3195  * is known, it walks down the TG3_BDINFO entries to select the ring.
3196  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3197  * which is within the range of the new packet's length is chosen.
3198  *
3199  * The "separate ring for rx status" scheme may sound queer, but it makes
3200  * sense from a cache coherency perspective.  If only the host writes
3201  * to the buffer post rings, and only the chip writes to the rx status
3202  * rings, then cache lines never move beyond shared-modified state.
3203  * If both the host and chip were to write into the same ring, cache line
3204  * eviction could occur since both entities want it in an exclusive state.
3205  */
3206 static int tg3_rx(struct tg3 *tp, int budget)
3207 {
3208         u32 work_mask, rx_std_posted = 0;
3209         u32 sw_idx = tp->rx_rcb_ptr;
3210         u16 hw_idx;
3211         int received;
3212
3213         hw_idx = tp->hw_status->idx[0].rx_producer;
3214         /*
3215          * We need to order the read of hw_idx and the read of
3216          * the opaque cookie.
3217          */
3218         rmb();
3219         work_mask = 0;
3220         received = 0;
3221         while (sw_idx != hw_idx && budget > 0) {
3222                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3223                 unsigned int len;
3224                 struct sk_buff *skb;
3225                 dma_addr_t dma_addr;
3226                 u32 opaque_key, desc_idx, *post_ptr;
3227
3228                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3229                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3230                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3231                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3232                                                   mapping);
3233                         skb = tp->rx_std_buffers[desc_idx].skb;
3234                         post_ptr = &tp->rx_std_ptr;
3235                         rx_std_posted++;
3236                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3237                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3238                                                   mapping);
3239                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3240                         post_ptr = &tp->rx_jumbo_ptr;
3241                 }
3242                 else {
3243                         goto next_pkt_nopost;
3244                 }
3245
3246                 work_mask |= opaque_key;
3247
3248                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3249                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3250                 drop_it:
3251                         tg3_recycle_rx(tp, opaque_key,
3252                                        desc_idx, *post_ptr);
3253                 drop_it_no_recycle:
3254                         /* Other statistics kept track of by card. */
3255                         tp->net_stats.rx_dropped++;
3256                         goto next_pkt;
3257                 }
3258
3259                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3260
3261                 if (len > RX_COPY_THRESHOLD 
3262                         && tp->rx_offset == 2
3263                         /* rx_offset != 2 iff this is a 5701 card running
3264                          * in PCI-X mode [see tg3_get_invariants()] */
3265                 ) {
3266                         int skb_size;
3267
3268                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3269                                                     desc_idx, *post_ptr);
3270                         if (skb_size < 0)
3271                                 goto drop_it;
3272
3273                         pci_unmap_single(tp->pdev, dma_addr,
3274                                          skb_size - tp->rx_offset,
3275                                          PCI_DMA_FROMDEVICE);
3276
3277                         skb_put(skb, len);
3278                 } else {
3279                         struct sk_buff *copy_skb;
3280
3281                         tg3_recycle_rx(tp, opaque_key,
3282                                        desc_idx, *post_ptr);
3283
3284                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3285                         if (copy_skb == NULL)
3286                                 goto drop_it_no_recycle;
3287
3288                         skb_reserve(copy_skb, 2);
3289                         skb_put(copy_skb, len);
3290                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3291                         memcpy(copy_skb->data, skb->data, len);
3292                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3293
3294                         /* We'll reuse the original ring buffer. */
3295                         skb = copy_skb;
3296                 }
3297
3298                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3299                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3300                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3301                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3302                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3303                 else
3304                         skb->ip_summed = CHECKSUM_NONE;
3305
3306                 skb->protocol = eth_type_trans(skb, tp->dev);
3307 #if TG3_VLAN_TAG_USED
3308                 if (tp->vlgrp != NULL &&
3309                     desc->type_flags & RXD_FLAG_VLAN) {
3310                         tg3_vlan_rx(tp, skb,
3311                                     desc->err_vlan & RXD_VLAN_MASK);
3312                 } else
3313 #endif
3314                         netif_receive_skb(skb);
3315
3316                 tp->dev->last_rx = jiffies;
3317                 received++;
3318                 budget--;
3319
3320 next_pkt:
3321                 (*post_ptr)++;
3322
3323                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3324                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3325
3326                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3327                                      TG3_64BIT_REG_LOW, idx);
3328                         work_mask &= ~RXD_OPAQUE_RING_STD;
3329                         rx_std_posted = 0;
3330                 }
3331 next_pkt_nopost:
3332                 sw_idx++;
3333                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3334
3335                 /* Refresh hw_idx to see if there is new work */
3336                 if (sw_idx == hw_idx) {
3337                         hw_idx = tp->hw_status->idx[0].rx_producer;
3338                         rmb();
3339                 }
3340         }
3341
3342         /* ACK the status ring. */
3343         tp->rx_rcb_ptr = sw_idx;
3344         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3345
3346         /* Refill RX ring(s). */
3347         if (work_mask & RXD_OPAQUE_RING_STD) {
3348                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3349                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3350                              sw_idx);
3351         }
3352         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3353                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3354                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3355                              sw_idx);
3356         }
3357         mmiowb();
3358
3359         return received;
3360 }
3361
3362 static int tg3_poll(struct net_device *netdev, int *budget)
3363 {
3364         struct tg3 *tp = netdev_priv(netdev);
3365         struct tg3_hw_status *sblk = tp->hw_status;
3366         int done;
3367
3368         /* handle link change and other phy events */
3369         if (!(tp->tg3_flags &
3370               (TG3_FLAG_USE_LINKCHG_REG |
3371                TG3_FLAG_POLL_SERDES))) {
3372                 if (sblk->status & SD_STATUS_LINK_CHG) {
3373                         sblk->status = SD_STATUS_UPDATED |
3374                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3375                         spin_lock(&tp->lock);
3376                         tg3_setup_phy(tp, 0);
3377                         spin_unlock(&tp->lock);
3378                 }
3379         }
3380
3381         /* run TX completion thread */
3382         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3383                 tg3_tx(tp);
3384                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3385                         netif_rx_complete(netdev);
3386                         schedule_work(&tp->reset_task);
3387                         return 0;
3388                 }
3389         }
3390
3391         /* run RX thread, within the bounds set by NAPI.
3392          * All RX "locking" is done by ensuring outside
3393          * code synchronizes with dev->poll()
3394          */
3395         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3396                 int orig_budget = *budget;
3397                 int work_done;
3398
3399                 if (orig_budget > netdev->quota)
3400                         orig_budget = netdev->quota;
3401
3402                 work_done = tg3_rx(tp, orig_budget);
3403
3404                 *budget -= work_done;
3405                 netdev->quota -= work_done;
3406         }
3407
3408         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3409                 tp->last_tag = sblk->status_tag;
3410                 rmb();
3411         } else
3412                 sblk->status &= ~SD_STATUS_UPDATED;
3413
3414         /* if no more work, tell net stack and NIC we're done */
3415         done = !tg3_has_work(tp);
3416         if (done) {
3417                 netif_rx_complete(netdev);
3418                 tg3_restart_ints(tp);
3419         }
3420
3421         return (done ? 0 : 1);
3422 }
3423
3424 static void tg3_irq_quiesce(struct tg3 *tp)
3425 {
3426         BUG_ON(tp->irq_sync);
3427
3428         tp->irq_sync = 1;
3429         smp_mb();
3430
3431         synchronize_irq(tp->pdev->irq);
3432 }
3433
3434 static inline int tg3_irq_sync(struct tg3 *tp)
3435 {
3436         return tp->irq_sync;
3437 }
3438
3439 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3440  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3441  * with as well.  Most of the time, this is not necessary except when
3442  * shutting down the device.
3443  */
3444 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3445 {
3446         if (irq_sync)
3447                 tg3_irq_quiesce(tp);
3448         spin_lock_bh(&tp->lock);
3449 }
3450
3451 static inline void tg3_full_unlock(struct tg3 *tp)
3452 {
3453         spin_unlock_bh(&tp->lock);
3454 }
3455
3456 /* One-shot MSI handler - Chip automatically disables interrupt
3457  * after sending MSI so driver doesn't have to do it.
3458  */
3459 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3460 {
3461         struct net_device *dev = dev_id;
3462         struct tg3 *tp = netdev_priv(dev);
3463
3464         prefetch(tp->hw_status);
3465         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3466
3467         if (likely(!tg3_irq_sync(tp)))
3468                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3469
3470         return IRQ_HANDLED;
3471 }
3472
3473 /* MSI ISR - No need to check for interrupt sharing and no need to
3474  * flush status block and interrupt mailbox. PCI ordering rules
3475  * guarantee that MSI will arrive after the status block.
3476  */
3477 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3478 {
3479         struct net_device *dev = dev_id;
3480         struct tg3 *tp = netdev_priv(dev);
3481
3482         prefetch(tp->hw_status);
3483         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3484         /*
3485          * Writing any value to intr-mbox-0 clears PCI INTA# and
3486          * chip-internal interrupt pending events.
3487          * Writing non-zero to intr-mbox-0 additional tells the
3488          * NIC to stop sending us irqs, engaging "in-intr-handler"
3489          * event coalescing.
3490          */
3491         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3492         if (likely(!tg3_irq_sync(tp)))
3493                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3494
3495         return IRQ_RETVAL(1);
3496 }
3497
3498 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3499 {
3500         struct net_device *dev = dev_id;
3501         struct tg3 *tp = netdev_priv(dev);
3502         struct tg3_hw_status *sblk = tp->hw_status;
3503         unsigned int handled = 1;
3504
3505         /* In INTx mode, it is possible for the interrupt to arrive at
3506          * the CPU before the status block posted prior to the interrupt.
3507          * Reading the PCI State register will confirm whether the
3508          * interrupt is ours and will flush the status block.
3509          */
3510         if ((sblk->status & SD_STATUS_UPDATED) ||
3511             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3512                 /*
3513                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3514                  * chip-internal interrupt pending events.
3515                  * Writing non-zero to intr-mbox-0 additional tells the
3516                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3517                  * event coalescing.
3518                  */
3519                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3520                              0x00000001);
3521                 if (tg3_irq_sync(tp))
3522                         goto out;
3523                 sblk->status &= ~SD_STATUS_UPDATED;
3524                 if (likely(tg3_has_work(tp))) {
3525                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3526                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3527                 } else {
3528                         /* No work, shared interrupt perhaps?  re-enable
3529                          * interrupts, and flush that PCI write
3530                          */
3531                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3532                                 0x00000000);
3533                 }
3534         } else {        /* shared interrupt */
3535                 handled = 0;
3536         }
3537 out:
3538         return IRQ_RETVAL(handled);
3539 }
3540
3541 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3542 {
3543         struct net_device *dev = dev_id;
3544         struct tg3 *tp = netdev_priv(dev);
3545         struct tg3_hw_status *sblk = tp->hw_status;
3546         unsigned int handled = 1;
3547
3548         /* In INTx mode, it is possible for the interrupt to arrive at
3549          * the CPU before the status block posted prior to the interrupt.
3550          * Reading the PCI State register will confirm whether the
3551          * interrupt is ours and will flush the status block.
3552          */
3553         if ((sblk->status_tag != tp->last_tag) ||
3554             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3555                 /*
3556                  * writing any value to intr-mbox-0 clears PCI INTA# and
3557                  * chip-internal interrupt pending events.
3558                  * writing non-zero to intr-mbox-0 additional tells the
3559                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3560                  * event coalescing.
3561                  */
3562                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3563                              0x00000001);
3564                 if (tg3_irq_sync(tp))
3565                         goto out;
3566                 if (netif_rx_schedule_prep(dev)) {
3567                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3568                         /* Update last_tag to mark that this status has been
3569                          * seen. Because interrupt may be shared, we may be
3570                          * racing with tg3_poll(), so only update last_tag
3571                          * if tg3_poll() is not scheduled.
3572                          */
3573                         tp->last_tag = sblk->status_tag;
3574                         __netif_rx_schedule(dev);
3575                 }
3576         } else {        /* shared interrupt */
3577                 handled = 0;
3578         }
3579 out:
3580         return IRQ_RETVAL(handled);
3581 }
3582
3583 /* ISR for interrupt test */
3584 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3585                 struct pt_regs *regs)
3586 {
3587         struct net_device *dev = dev_id;
3588         struct tg3 *tp = netdev_priv(dev);
3589         struct tg3_hw_status *sblk = tp->hw_status;
3590
3591         if ((sblk->status & SD_STATUS_UPDATED) ||
3592             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3593                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3594                              0x00000001);
3595                 return IRQ_RETVAL(1);
3596         }
3597         return IRQ_RETVAL(0);
3598 }
3599
3600 static int tg3_init_hw(struct tg3 *, int);
3601 static int tg3_halt(struct tg3 *, int, int);
3602
3603 /* Restart hardware after configuration changes, self-test, etc.
3604  * Invoked with tp->lock held.
3605  */
3606 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3607 {
3608         int err;
3609
3610         err = tg3_init_hw(tp, reset_phy);
3611         if (err) {
3612                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3613                        "aborting.\n", tp->dev->name);
3614                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3615                 tg3_full_unlock(tp);
3616                 del_timer_sync(&tp->timer);
3617                 tp->irq_sync = 0;
3618                 netif_poll_enable(tp->dev);
3619                 dev_close(tp->dev);
3620                 tg3_full_lock(tp, 0);
3621         }
3622         return err;
3623 }
3624
3625 #ifdef CONFIG_NET_POLL_CONTROLLER
3626 static void tg3_poll_controller(struct net_device *dev)
3627 {
3628         struct tg3 *tp = netdev_priv(dev);
3629
3630         tg3_interrupt(tp->pdev->irq, dev, NULL);
3631 }
3632 #endif
3633
3634 static void tg3_reset_task(void *_data)
3635 {
3636         struct tg3 *tp = _data;
3637         unsigned int restart_timer;
3638
3639         tg3_full_lock(tp, 0);
3640         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3641
3642         if (!netif_running(tp->dev)) {
3643                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3644                 tg3_full_unlock(tp);
3645                 return;
3646         }
3647
3648         tg3_full_unlock(tp);
3649
3650         tg3_netif_stop(tp);
3651
3652         tg3_full_lock(tp, 1);
3653
3654         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3655         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3656
3657         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3658                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3659                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3660                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3661                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3662         }
3663
3664         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3665         if (tg3_init_hw(tp, 1))
3666                 goto out;
3667
3668         tg3_netif_start(tp);
3669
3670         if (restart_timer)
3671                 mod_timer(&tp->timer, jiffies + 1);
3672
3673 out:
3674         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3675
3676         tg3_full_unlock(tp);
3677 }
3678
3679 static void tg3_tx_timeout(struct net_device *dev)
3680 {
3681         struct tg3 *tp = netdev_priv(dev);
3682
3683         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3684                dev->name);
3685
3686         schedule_work(&tp->reset_task);
3687 }
3688
3689 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3690 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3691 {
3692         u32 base = (u32) mapping & 0xffffffff;
3693
3694         return ((base > 0xffffdcc0) &&
3695                 (base + len + 8 < base));
3696 }
3697
3698 /* Test for DMA addresses > 40-bit */
3699 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3700                                           int len)
3701 {
3702 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3703         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3704                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3705         return 0;
3706 #else
3707         return 0;
3708 #endif
3709 }
3710
3711 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3712
3713 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3714 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3715                                        u32 last_plus_one, u32 *start,
3716                                        u32 base_flags, u32 mss)
3717 {
3718         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3719         dma_addr_t new_addr = 0;
3720         u32 entry = *start;
3721         int i, ret = 0;
3722
3723         if (!new_skb) {
3724                 ret = -1;
3725         } else {
3726                 /* New SKB is guaranteed to be linear. */
3727                 entry = *start;
3728                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3729                                           PCI_DMA_TODEVICE);
3730                 /* Make sure new skb does not cross any 4G boundaries.
3731                  * Drop the packet if it does.
3732                  */
3733                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3734                         ret = -1;
3735                         dev_kfree_skb(new_skb);
3736                         new_skb = NULL;
3737                 } else {
3738                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3739                                     base_flags, 1 | (mss << 1));
3740                         *start = NEXT_TX(entry);
3741                 }
3742         }
3743
3744         /* Now clean up the sw ring entries. */
3745         i = 0;
3746         while (entry != last_plus_one) {
3747                 int len;
3748
3749                 if (i == 0)
3750                         len = skb_headlen(skb);
3751                 else
3752                         len = skb_shinfo(skb)->frags[i-1].size;
3753                 pci_unmap_single(tp->pdev,
3754                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3755                                  len, PCI_DMA_TODEVICE);
3756                 if (i == 0) {
3757                         tp->tx_buffers[entry].skb = new_skb;
3758                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3759                 } else {
3760                         tp->tx_buffers[entry].skb = NULL;
3761                 }
3762                 entry = NEXT_TX(entry);
3763                 i++;
3764         }
3765
3766         dev_kfree_skb(skb);
3767
3768         return ret;
3769 }
3770
3771 static void tg3_set_txd(struct tg3 *tp, int entry,
3772                         dma_addr_t mapping, int len, u32 flags,
3773                         u32 mss_and_is_end)
3774 {
3775         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3776         int is_end = (mss_and_is_end & 0x1);
3777         u32 mss = (mss_and_is_end >> 1);
3778         u32 vlan_tag = 0;
3779
3780         if (is_end)
3781                 flags |= TXD_FLAG_END;
3782         if (flags & TXD_FLAG_VLAN) {
3783                 vlan_tag = flags >> 16;
3784                 flags &= 0xffff;
3785         }
3786         vlan_tag |= (mss << TXD_MSS_SHIFT);
3787
3788         txd->addr_hi = ((u64) mapping >> 32);
3789         txd->addr_lo = ((u64) mapping & 0xffffffff);
3790         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3791         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3792 }
3793
3794 /* hard_start_xmit for devices that don't have any bugs and
3795  * support TG3_FLG2_HW_TSO_2 only.
3796  */
3797 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3798 {
3799         struct tg3 *tp = netdev_priv(dev);
3800         dma_addr_t mapping;
3801         u32 len, entry, base_flags, mss;
3802
3803         len = skb_headlen(skb);
3804
3805         /* We are running in BH disabled context with netif_tx_lock
3806          * and TX reclaim runs via tp->poll inside of a software
3807          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3808          * no IRQ context deadlocks to worry about either.  Rejoice!
3809          */
3810         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3811                 if (!netif_queue_stopped(dev)) {
3812                         netif_stop_queue(dev);
3813
3814                         /* This is a hard error, log it. */
3815                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3816                                "queue awake!\n", dev->name);
3817                 }
3818                 return NETDEV_TX_BUSY;
3819         }
3820
3821         entry = tp->tx_prod;
3822         base_flags = 0;
3823 #if TG3_TSO_SUPPORT != 0
3824         mss = 0;
3825         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3826             (mss = skb_shinfo(skb)->gso_size) != 0) {
3827                 int tcp_opt_len, ip_tcp_len;
3828
3829                 if (skb_header_cloned(skb) &&
3830                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3831                         dev_kfree_skb(skb);
3832                         goto out_unlock;
3833                 }
3834
3835                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3836                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3837                 else {
3838                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3839                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3840                                      sizeof(struct tcphdr);
3841
3842                         skb->nh.iph->check = 0;
3843                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3844                                                      tcp_opt_len);
3845                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3846                 }
3847
3848                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3849                                TXD_FLAG_CPU_POST_DMA);
3850
3851                 skb->h.th->check = 0;
3852
3853         }
3854         else if (skb->ip_summed == CHECKSUM_HW)
3855                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3856 #else
3857         mss = 0;
3858         if (skb->ip_summed == CHECKSUM_HW)
3859                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3860 #endif
3861 #if TG3_VLAN_TAG_USED
3862         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3863                 base_flags |= (TXD_FLAG_VLAN |
3864                                (vlan_tx_tag_get(skb) << 16));
3865 #endif
3866
3867         /* Queue skb data, a.k.a. the main skb fragment. */
3868         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3869
3870         tp->tx_buffers[entry].skb = skb;
3871         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3872
3873         tg3_set_txd(tp, entry, mapping, len, base_flags,
3874                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3875
3876         entry = NEXT_TX(entry);
3877
3878         /* Now loop through additional data fragments, and queue them. */
3879         if (skb_shinfo(skb)->nr_frags > 0) {
3880                 unsigned int i, last;
3881
3882                 last = skb_shinfo(skb)->nr_frags - 1;
3883                 for (i = 0; i <= last; i++) {
3884                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3885
3886                         len = frag->size;
3887                         mapping = pci_map_page(tp->pdev,
3888                                                frag->page,
3889                                                frag->page_offset,
3890                                                len, PCI_DMA_TODEVICE);
3891
3892                         tp->tx_buffers[entry].skb = NULL;
3893                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3894
3895                         tg3_set_txd(tp, entry, mapping, len,
3896                                     base_flags, (i == last) | (mss << 1));
3897
3898                         entry = NEXT_TX(entry);
3899                 }
3900         }
3901
3902         /* Packets are ready, update Tx producer idx local and on card. */
3903         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3904
3905         tp->tx_prod = entry;
3906         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3907                 netif_stop_queue(dev);
3908                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3909                         netif_wake_queue(tp->dev);
3910         }
3911
3912 out_unlock:
3913         mmiowb();
3914
3915         dev->trans_start = jiffies;
3916
3917         return NETDEV_TX_OK;
3918 }
3919
3920 #if TG3_TSO_SUPPORT != 0
3921 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3922
3923 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3924  * TSO header is greater than 80 bytes.
3925  */
3926 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3927 {
3928         struct sk_buff *segs, *nskb;
3929
3930         /* Estimate the number of fragments in the worst case */
3931         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3932                 netif_stop_queue(tp->dev);
3933                 return NETDEV_TX_BUSY;
3934         }
3935
3936         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3937         if (unlikely(IS_ERR(segs)))
3938                 goto tg3_tso_bug_end;
3939
3940         do {
3941                 nskb = segs;
3942                 segs = segs->next;
3943                 nskb->next = NULL;
3944                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3945         } while (segs);
3946
3947 tg3_tso_bug_end:
3948         dev_kfree_skb(skb);
3949
3950         return NETDEV_TX_OK;
3951 }
3952 #endif
3953
3954 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3955  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3956  */
3957 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3958 {
3959         struct tg3 *tp = netdev_priv(dev);
3960         dma_addr_t mapping;
3961         u32 len, entry, base_flags, mss;
3962         int would_hit_hwbug;
3963
3964         len = skb_headlen(skb);
3965
3966         /* We are running in BH disabled context with netif_tx_lock
3967          * and TX reclaim runs via tp->poll inside of a software
3968          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3969          * no IRQ context deadlocks to worry about either.  Rejoice!
3970          */
3971         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3972                 if (!netif_queue_stopped(dev)) {
3973                         netif_stop_queue(dev);
3974
3975                         /* This is a hard error, log it. */
3976                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3977                                "queue awake!\n", dev->name);
3978                 }
3979                 return NETDEV_TX_BUSY;
3980         }
3981
3982         entry = tp->tx_prod;
3983         base_flags = 0;
3984         if (skb->ip_summed == CHECKSUM_HW)
3985                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3986 #if TG3_TSO_SUPPORT != 0
3987         mss = 0;
3988         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3989             (mss = skb_shinfo(skb)->gso_size) != 0) {
3990                 int tcp_opt_len, ip_tcp_len, hdr_len;
3991
3992                 if (skb_header_cloned(skb) &&
3993                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3994                         dev_kfree_skb(skb);
3995                         goto out_unlock;
3996                 }
3997
3998                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3999                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4000
4001                 hdr_len = ip_tcp_len + tcp_opt_len;
4002                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4003                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4004                         return (tg3_tso_bug(tp, skb));
4005
4006                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4007                                TXD_FLAG_CPU_POST_DMA);
4008
4009                 skb->nh.iph->check = 0;
4010                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4011                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4012                         skb->h.th->check = 0;
4013                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4014                 }
4015                 else {
4016                         skb->h.th->check =
4017                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4018                                                    skb->nh.iph->daddr,
4019                                                    0, IPPROTO_TCP, 0);
4020                 }
4021
4022                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4023                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4024                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4025                                 int tsflags;
4026
4027                                 tsflags = ((skb->nh.iph->ihl - 5) +
4028                                            (tcp_opt_len >> 2));
4029                                 mss |= (tsflags << 11);
4030                         }
4031                 } else {
4032                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4033                                 int tsflags;
4034
4035                                 tsflags = ((skb->nh.iph->ihl - 5) +
4036                                            (tcp_opt_len >> 2));
4037                                 base_flags |= tsflags << 12;
4038                         }
4039                 }
4040         }
4041 #else
4042         mss = 0;
4043 #endif
4044 #if TG3_VLAN_TAG_USED
4045         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4046                 base_flags |= (TXD_FLAG_VLAN |
4047                                (vlan_tx_tag_get(skb) << 16));
4048 #endif
4049
4050         /* Queue skb data, a.k.a. the main skb fragment. */
4051         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4052
4053         tp->tx_buffers[entry].skb = skb;
4054         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4055
4056         would_hit_hwbug = 0;
4057
4058         if (tg3_4g_overflow_test(mapping, len))
4059                 would_hit_hwbug = 1;
4060
4061         tg3_set_txd(tp, entry, mapping, len, base_flags,
4062                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4063
4064         entry = NEXT_TX(entry);
4065
4066         /* Now loop through additional data fragments, and queue them. */
4067         if (skb_shinfo(skb)->nr_frags > 0) {
4068                 unsigned int i, last;
4069
4070                 last = skb_shinfo(skb)->nr_frags - 1;
4071                 for (i = 0; i <= last; i++) {
4072                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4073
4074                         len = frag->size;
4075                         mapping = pci_map_page(tp->pdev,
4076                                                frag->page,
4077                                                frag->page_offset,
4078                                                len, PCI_DMA_TODEVICE);
4079
4080                         tp->tx_buffers[entry].skb = NULL;
4081                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4082
4083                         if (tg3_4g_overflow_test(mapping, len))
4084                                 would_hit_hwbug = 1;
4085
4086                         if (tg3_40bit_overflow_test(tp, mapping, len))
4087                                 would_hit_hwbug = 1;
4088
4089                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4090                                 tg3_set_txd(tp, entry, mapping, len,
4091                                             base_flags, (i == last)|(mss << 1));
4092                         else
4093                                 tg3_set_txd(tp, entry, mapping, len,
4094                                             base_flags, (i == last));
4095
4096                         entry = NEXT_TX(entry);
4097                 }
4098         }
4099
4100         if (would_hit_hwbug) {
4101                 u32 last_plus_one = entry;
4102                 u32 start;
4103
4104                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4105                 start &= (TG3_TX_RING_SIZE - 1);
4106
4107                 /* If the workaround fails due to memory/mapping
4108                  * failure, silently drop this packet.
4109                  */
4110                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4111                                                 &start, base_flags, mss))
4112                         goto out_unlock;
4113
4114                 entry = start;
4115         }
4116
4117         /* Packets are ready, update Tx producer idx local and on card. */
4118         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4119
4120         tp->tx_prod = entry;
4121         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4122                 netif_stop_queue(dev);
4123                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4124                         netif_wake_queue(tp->dev);
4125         }
4126
4127 out_unlock:
4128         mmiowb();
4129
4130         dev->trans_start = jiffies;
4131
4132         return NETDEV_TX_OK;
4133 }
4134
4135 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4136                                int new_mtu)
4137 {
4138         dev->mtu = new_mtu;
4139
4140         if (new_mtu > ETH_DATA_LEN) {
4141                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4142                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4143                         ethtool_op_set_tso(dev, 0);
4144                 }
4145                 else
4146                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4147         } else {
4148                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4149                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4150                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4151         }
4152 }
4153
4154 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4155 {
4156         struct tg3 *tp = netdev_priv(dev);
4157         int err;
4158
4159         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4160                 return -EINVAL;
4161
4162         if (!netif_running(dev)) {
4163                 /* We'll just catch it later when the
4164                  * device is up'd.
4165                  */
4166                 tg3_set_mtu(dev, tp, new_mtu);
4167                 return 0;
4168         }
4169
4170         tg3_netif_stop(tp);
4171
4172         tg3_full_lock(tp, 1);
4173
4174         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4175
4176         tg3_set_mtu(dev, tp, new_mtu);
4177
4178         err = tg3_restart_hw(tp, 0);
4179
4180         if (!err)
4181                 tg3_netif_start(tp);
4182
4183         tg3_full_unlock(tp);
4184
4185         return err;
4186 }
4187
4188 /* Free up pending packets in all rx/tx rings.
4189  *
4190  * The chip has been shut down and the driver detached from
4191  * the networking, so no interrupts or new tx packets will
4192  * end up in the driver.  tp->{tx,}lock is not held and we are not
4193  * in an interrupt context and thus may sleep.
4194  */
4195 static void tg3_free_rings(struct tg3 *tp)
4196 {
4197         struct ring_info *rxp;
4198         int i;
4199
4200         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4201                 rxp = &tp->rx_std_buffers[i];
4202
4203                 if (rxp->skb == NULL)
4204                         continue;
4205                 pci_unmap_single(tp->pdev,
4206                                  pci_unmap_addr(rxp, mapping),
4207                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4208                                  PCI_DMA_FROMDEVICE);
4209                 dev_kfree_skb_any(rxp->skb);
4210                 rxp->skb = NULL;
4211         }
4212
4213         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4214                 rxp = &tp->rx_jumbo_buffers[i];
4215
4216                 if (rxp->skb == NULL)
4217                         continue;
4218                 pci_unmap_single(tp->pdev,
4219                                  pci_unmap_addr(rxp, mapping),
4220                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4221                                  PCI_DMA_FROMDEVICE);
4222                 dev_kfree_skb_any(rxp->skb);
4223                 rxp->skb = NULL;
4224         }
4225
4226         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4227                 struct tx_ring_info *txp;
4228                 struct sk_buff *skb;
4229                 int j;
4230
4231                 txp = &tp->tx_buffers[i];
4232                 skb = txp->skb;
4233
4234                 if (skb == NULL) {
4235                         i++;
4236                         continue;
4237                 }
4238
4239                 pci_unmap_single(tp->pdev,
4240                                  pci_unmap_addr(txp, mapping),
4241                                  skb_headlen(skb),
4242                                  PCI_DMA_TODEVICE);
4243                 txp->skb = NULL;
4244
4245                 i++;
4246
4247                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4248                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4249                         pci_unmap_page(tp->pdev,
4250                                        pci_unmap_addr(txp, mapping),
4251                                        skb_shinfo(skb)->frags[j].size,
4252                                        PCI_DMA_TODEVICE);
4253                         i++;
4254                 }
4255
4256                 dev_kfree_skb_any(skb);
4257         }
4258 }
4259
4260 /* Initialize tx/rx rings for packet processing.
4261  *
4262  * The chip has been shut down and the driver detached from
4263  * the networking, so no interrupts or new tx packets will
4264  * end up in the driver.  tp->{tx,}lock are held and thus
4265  * we may not sleep.
4266  */
4267 static int tg3_init_rings(struct tg3 *tp)
4268 {
4269         u32 i;
4270
4271         /* Free up all the SKBs. */
4272         tg3_free_rings(tp);
4273
4274         /* Zero out all descriptors. */
4275         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4276         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4277         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4278         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4279
4280         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4281         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4282             (tp->dev->mtu > ETH_DATA_LEN))
4283                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4284
4285         /* Initialize invariants of the rings, we only set this
4286          * stuff once.  This works because the card does not
4287          * write into the rx buffer posting rings.
4288          */
4289         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4290                 struct tg3_rx_buffer_desc *rxd;
4291
4292                 rxd = &tp->rx_std[i];
4293                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4294                         << RXD_LEN_SHIFT;
4295                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4296                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4297                                (i << RXD_OPAQUE_INDEX_SHIFT));
4298         }
4299
4300         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4301                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4302                         struct tg3_rx_buffer_desc *rxd;
4303
4304                         rxd = &tp->rx_jumbo[i];
4305                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4306                                 << RXD_LEN_SHIFT;
4307                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4308                                 RXD_FLAG_JUMBO;
4309                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4310                                (i << RXD_OPAQUE_INDEX_SHIFT));
4311                 }
4312         }
4313
4314         /* Now allocate fresh SKBs for each rx ring. */
4315         for (i = 0; i < tp->rx_pending; i++) {
4316                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4317                         printk(KERN_WARNING PFX
4318                                "%s: Using a smaller RX standard ring, "
4319                                "only %d out of %d buffers were allocated "
4320                                "successfully.\n",
4321                                tp->dev->name, i, tp->rx_pending);
4322                         if (i == 0)
4323                                 return -ENOMEM;
4324                         tp->rx_pending = i;
4325                         break;
4326                 }
4327         }
4328
4329         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4330                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4331                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4332                                              -1, i) < 0) {
4333                                 printk(KERN_WARNING PFX
4334                                        "%s: Using a smaller RX jumbo ring, "
4335                                        "only %d out of %d buffers were "
4336                                        "allocated successfully.\n",
4337                                        tp->dev->name, i, tp->rx_jumbo_pending);
4338                                 if (i == 0) {
4339                                         tg3_free_rings(tp);
4340                                         return -ENOMEM;
4341                                 }
4342                                 tp->rx_jumbo_pending = i;
4343                                 break;
4344                         }
4345                 }
4346         }
4347         return 0;
4348 }
4349
4350 /*
4351  * Must not be invoked with interrupt sources disabled and
4352  * the hardware shutdown down.
4353  */
4354 static void tg3_free_consistent(struct tg3 *tp)
4355 {
4356         kfree(tp->rx_std_buffers);
4357         tp->rx_std_buffers = NULL;
4358         if (tp->rx_std) {
4359                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4360                                     tp->rx_std, tp->rx_std_mapping);
4361                 tp->rx_std = NULL;
4362         }
4363         if (tp->rx_jumbo) {
4364                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4365                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4366                 tp->rx_jumbo = NULL;
4367         }
4368         if (tp->rx_rcb) {
4369                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4370                                     tp->rx_rcb, tp->rx_rcb_mapping);
4371                 tp->rx_rcb = NULL;
4372         }
4373         if (tp->tx_ring) {
4374                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4375                         tp->tx_ring, tp->tx_desc_mapping);
4376                 tp->tx_ring = NULL;
4377         }
4378         if (tp->hw_status) {
4379                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4380                                     tp->hw_status, tp->status_mapping);
4381                 tp->hw_status = NULL;
4382         }
4383         if (tp->hw_stats) {
4384                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4385                                     tp->hw_stats, tp->stats_mapping);
4386                 tp->hw_stats = NULL;
4387         }
4388 }
4389
4390 /*
4391  * Must not be invoked with interrupt sources disabled and
4392  * the hardware shutdown down.  Can sleep.
4393  */
4394 static int tg3_alloc_consistent(struct tg3 *tp)
4395 {
4396         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4397                                       (TG3_RX_RING_SIZE +
4398                                        TG3_RX_JUMBO_RING_SIZE)) +
4399                                      (sizeof(struct tx_ring_info) *
4400                                       TG3_TX_RING_SIZE),
4401                                      GFP_KERNEL);
4402         if (!tp->rx_std_buffers)
4403                 return -ENOMEM;
4404
4405         memset(tp->rx_std_buffers, 0,
4406                (sizeof(struct ring_info) *
4407                 (TG3_RX_RING_SIZE +
4408                  TG3_RX_JUMBO_RING_SIZE)) +
4409                (sizeof(struct tx_ring_info) *
4410                 TG3_TX_RING_SIZE));
4411
4412         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4413         tp->tx_buffers = (struct tx_ring_info *)
4414                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4415
4416         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4417                                           &tp->rx_std_mapping);
4418         if (!tp->rx_std)
4419                 goto err_out;
4420
4421         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4422                                             &tp->rx_jumbo_mapping);
4423
4424         if (!tp->rx_jumbo)
4425                 goto err_out;
4426
4427         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4428                                           &tp->rx_rcb_mapping);
4429         if (!tp->rx_rcb)
4430                 goto err_out;
4431
4432         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4433                                            &tp->tx_desc_mapping);
4434         if (!tp->tx_ring)
4435                 goto err_out;
4436
4437         tp->hw_status = pci_alloc_consistent(tp->pdev,
4438                                              TG3_HW_STATUS_SIZE,
4439                                              &tp->status_mapping);
4440         if (!tp->hw_status)
4441                 goto err_out;
4442
4443         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4444                                             sizeof(struct tg3_hw_stats),
4445                                             &tp->stats_mapping);
4446         if (!tp->hw_stats)
4447                 goto err_out;
4448
4449         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4450         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4451
4452         return 0;
4453
4454 err_out:
4455         tg3_free_consistent(tp);
4456         return -ENOMEM;
4457 }
4458
4459 #define MAX_WAIT_CNT 1000
4460
4461 /* To stop a block, clear the enable bit and poll till it
4462  * clears.  tp->lock is held.
4463  */
4464 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4465 {
4466         unsigned int i;
4467         u32 val;
4468
4469         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4470                 switch (ofs) {
4471                 case RCVLSC_MODE:
4472                 case DMAC_MODE:
4473                 case MBFREE_MODE:
4474                 case BUFMGR_MODE:
4475                 case MEMARB_MODE:
4476                         /* We can't enable/disable these bits of the
4477                          * 5705/5750, just say success.
4478                          */
4479                         return 0;
4480
4481                 default:
4482                         break;
4483                 };
4484         }
4485
4486         val = tr32(ofs);
4487         val &= ~enable_bit;
4488         tw32_f(ofs, val);
4489
4490         for (i = 0; i < MAX_WAIT_CNT; i++) {
4491                 udelay(100);
4492                 val = tr32(ofs);
4493                 if ((val & enable_bit) == 0)
4494                         break;
4495         }
4496
4497         if (i == MAX_WAIT_CNT && !silent) {
4498                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4499                        "ofs=%lx enable_bit=%x\n",
4500                        ofs, enable_bit);
4501                 return -ENODEV;
4502         }
4503
4504         return 0;
4505 }
4506
4507 /* tp->lock is held. */
4508 static int tg3_abort_hw(struct tg3 *tp, int silent)
4509 {
4510         int i, err;
4511
4512         tg3_disable_ints(tp);
4513
4514         tp->rx_mode &= ~RX_MODE_ENABLE;
4515         tw32_f(MAC_RX_MODE, tp->rx_mode);
4516         udelay(10);
4517
4518         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4520         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4521         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4522         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4524
4525         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4526         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4527         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4528         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4529         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4530         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4531         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4532
4533         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4534         tw32_f(MAC_MODE, tp->mac_mode);
4535         udelay(40);
4536
4537         tp->tx_mode &= ~TX_MODE_ENABLE;
4538         tw32_f(MAC_TX_MODE, tp->tx_mode);
4539
4540         for (i = 0; i < MAX_WAIT_CNT; i++) {
4541                 udelay(100);
4542                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4543                         break;
4544         }
4545         if (i >= MAX_WAIT_CNT) {
4546                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4547                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4548                        tp->dev->name, tr32(MAC_TX_MODE));
4549                 err |= -ENODEV;
4550         }
4551
4552         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4553         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4554         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4555
4556         tw32(FTQ_RESET, 0xffffffff);
4557         tw32(FTQ_RESET, 0x00000000);
4558
4559         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4560         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4561
4562         if (tp->hw_status)
4563                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4564         if (tp->hw_stats)
4565                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4566
4567         return err;
4568 }
4569
4570 /* tp->lock is held. */
4571 static int tg3_nvram_lock(struct tg3 *tp)
4572 {
4573         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4574                 int i;
4575
4576                 if (tp->nvram_lock_cnt == 0) {
4577                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4578                         for (i = 0; i < 8000; i++) {
4579                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4580                                         break;
4581                                 udelay(20);
4582                         }
4583                         if (i == 8000) {
4584                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4585                                 return -ENODEV;
4586                         }
4587                 }
4588                 tp->nvram_lock_cnt++;
4589         }
4590         return 0;
4591 }
4592
4593 /* tp->lock is held. */
4594 static void tg3_nvram_unlock(struct tg3 *tp)
4595 {
4596         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4597                 if (tp->nvram_lock_cnt > 0)
4598                         tp->nvram_lock_cnt--;
4599                 if (tp->nvram_lock_cnt == 0)
4600                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4601         }
4602 }
4603
4604 /* tp->lock is held. */
4605 static void tg3_enable_nvram_access(struct tg3 *tp)
4606 {
4607         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4608             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4609                 u32 nvaccess = tr32(NVRAM_ACCESS);
4610
4611                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4612         }
4613 }
4614
4615 /* tp->lock is held. */
4616 static void tg3_disable_nvram_access(struct tg3 *tp)
4617 {
4618         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4619             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4620                 u32 nvaccess = tr32(NVRAM_ACCESS);
4621
4622                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4623         }
4624 }
4625
4626 /* tp->lock is held. */
4627 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4628 {
4629         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4630                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4631
4632         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4633                 switch (kind) {
4634                 case RESET_KIND_INIT:
4635                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4636                                       DRV_STATE_START);
4637                         break;
4638
4639                 case RESET_KIND_SHUTDOWN:
4640                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4641                                       DRV_STATE_UNLOAD);
4642                         break;
4643
4644                 case RESET_KIND_SUSPEND:
4645                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4646                                       DRV_STATE_SUSPEND);
4647                         break;
4648
4649                 default:
4650                         break;
4651                 };
4652         }
4653 }
4654
4655 /* tp->lock is held. */
4656 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4657 {
4658         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4659                 switch (kind) {
4660                 case RESET_KIND_INIT:
4661                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4662                                       DRV_STATE_START_DONE);
4663                         break;
4664
4665                 case RESET_KIND_SHUTDOWN:
4666                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4667                                       DRV_STATE_UNLOAD_DONE);
4668                         break;
4669
4670                 default:
4671                         break;
4672                 };
4673         }
4674 }
4675
4676 /* tp->lock is held. */
4677 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4678 {
4679         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4680                 switch (kind) {
4681                 case RESET_KIND_INIT:
4682                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4683                                       DRV_STATE_START);
4684                         break;
4685
4686                 case RESET_KIND_SHUTDOWN:
4687                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4688                                       DRV_STATE_UNLOAD);
4689                         break;
4690
4691                 case RESET_KIND_SUSPEND:
4692                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4693                                       DRV_STATE_SUSPEND);
4694                         break;
4695
4696                 default:
4697                         break;
4698                 };
4699         }
4700 }
4701
4702 static void tg3_stop_fw(struct tg3 *);
4703
4704 /* tp->lock is held. */
4705 static int tg3_chip_reset(struct tg3 *tp)
4706 {
4707         u32 val;
4708         void (*write_op)(struct tg3 *, u32, u32);
4709         int i;
4710
4711         tg3_nvram_lock(tp);
4712
4713         /* No matching tg3_nvram_unlock() after this because
4714          * chip reset below will undo the nvram lock.
4715          */
4716         tp->nvram_lock_cnt = 0;
4717
4718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4719             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4720             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4721                 tw32(GRC_FASTBOOT_PC, 0);
4722
4723         /*
4724          * We must avoid the readl() that normally takes place.
4725          * It locks machines, causes machine checks, and other
4726          * fun things.  So, temporarily disable the 5701
4727          * hardware workaround, while we do the reset.
4728          */
4729         write_op = tp->write32;
4730         if (write_op == tg3_write_flush_reg32)
4731                 tp->write32 = tg3_write32;
4732
4733         /* do the reset */
4734         val = GRC_MISC_CFG_CORECLK_RESET;
4735
4736         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4737                 if (tr32(0x7e2c) == 0x60) {
4738                         tw32(0x7e2c, 0x20);
4739                 }
4740                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4741                         tw32(GRC_MISC_CFG, (1 << 29));
4742                         val |= (1 << 29);
4743                 }
4744         }
4745
4746         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4747                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4748         tw32(GRC_MISC_CFG, val);
4749
4750         /* restore 5701 hardware bug workaround write method */
4751         tp->write32 = write_op;
4752
4753         /* Unfortunately, we have to delay before the PCI read back.
4754          * Some 575X chips even will not respond to a PCI cfg access
4755          * when the reset command is given to the chip.
4756          *
4757          * How do these hardware designers expect things to work
4758          * properly if the PCI write is posted for a long period
4759          * of time?  It is always necessary to have some method by
4760          * which a register read back can occur to push the write
4761          * out which does the reset.
4762          *
4763          * For most tg3 variants the trick below was working.
4764          * Ho hum...
4765          */
4766         udelay(120);
4767
4768         /* Flush PCI posted writes.  The normal MMIO registers
4769          * are inaccessible at this time so this is the only
4770          * way to make this reliably (actually, this is no longer
4771          * the case, see above).  I tried to use indirect
4772          * register read/write but this upset some 5701 variants.
4773          */
4774         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4775
4776         udelay(120);
4777
4778         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4779                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4780                         int i;
4781                         u32 cfg_val;
4782
4783                         /* Wait for link training to complete.  */
4784                         for (i = 0; i < 5000; i++)
4785                                 udelay(100);
4786
4787                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4788                         pci_write_config_dword(tp->pdev, 0xc4,
4789                                                cfg_val | (1 << 15));
4790                 }
4791                 /* Set PCIE max payload size and clear error status.  */
4792                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4793         }
4794
4795         /* Re-enable indirect register accesses. */
4796         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4797                                tp->misc_host_ctrl);
4798
4799         /* Set MAX PCI retry to zero. */
4800         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4801         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4802             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4803                 val |= PCISTATE_RETRY_SAME_DMA;
4804         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4805
4806         pci_restore_state(tp->pdev);
4807
4808         /* Make sure PCI-X relaxed ordering bit is clear. */
4809         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4810         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4811         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4812
4813         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4814                 u32 val;
4815
4816                 /* Chip reset on 5780 will reset MSI enable bit,
4817                  * so need to restore it.
4818                  */
4819                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4820                         u16 ctrl;
4821
4822                         pci_read_config_word(tp->pdev,
4823                                              tp->msi_cap + PCI_MSI_FLAGS,
4824                                              &ctrl);
4825                         pci_write_config_word(tp->pdev,
4826                                               tp->msi_cap + PCI_MSI_FLAGS,
4827                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4828                         val = tr32(MSGINT_MODE);
4829                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4830                 }
4831
4832                 val = tr32(MEMARB_MODE);
4833                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4834
4835         } else
4836                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4837
4838         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4839                 tg3_stop_fw(tp);
4840                 tw32(0x5000, 0x400);
4841         }
4842
4843         tw32(GRC_MODE, tp->grc_mode);
4844
4845         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4846                 u32 val = tr32(0xc4);
4847
4848                 tw32(0xc4, val | (1 << 15));
4849         }
4850
4851         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4852             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4853                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4854                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4855                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4856                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4857         }
4858
4859         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4860                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4861                 tw32_f(MAC_MODE, tp->mac_mode);
4862         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4863                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4864                 tw32_f(MAC_MODE, tp->mac_mode);
4865         } else
4866                 tw32_f(MAC_MODE, 0);
4867         udelay(40);
4868
4869         /* Wait for firmware initialization to complete. */
4870         for (i = 0; i < 100000; i++) {
4871                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4872                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4873                         break;
4874                 udelay(10);
4875         }
4876
4877         /* Chip might not be fitted with firmare.  Some Sun onboard
4878          * parts are configured like that.  So don't signal the timeout
4879          * of the above loop as an error, but do report the lack of
4880          * running firmware once.
4881          */
4882         if (i >= 100000 &&
4883             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4884                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4885
4886                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4887                        tp->dev->name);
4888         }
4889
4890         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4891             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4892                 u32 val = tr32(0x7c00);
4893
4894                 tw32(0x7c00, val | (1 << 25));
4895         }
4896
4897         /* Reprobe ASF enable state.  */
4898         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4899         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4900         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4901         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4902                 u32 nic_cfg;
4903
4904                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4905                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4906                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4907                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4908                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4909                 }
4910         }
4911
4912         return 0;
4913 }
4914
4915 /* tp->lock is held. */
4916 static void tg3_stop_fw(struct tg3 *tp)
4917 {
4918         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4919                 u32 val;
4920                 int i;
4921
4922                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4923                 val = tr32(GRC_RX_CPU_EVENT);
4924                 val |= (1 << 14);
4925                 tw32(GRC_RX_CPU_EVENT, val);
4926
4927                 /* Wait for RX cpu to ACK the event.  */
4928                 for (i = 0; i < 100; i++) {
4929                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4930                                 break;
4931                         udelay(1);
4932                 }
4933         }
4934 }
4935
4936 /* tp->lock is held. */
4937 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4938 {
4939         int err;
4940
4941         tg3_stop_fw(tp);
4942
4943         tg3_write_sig_pre_reset(tp, kind);
4944
4945         tg3_abort_hw(tp, silent);
4946         err = tg3_chip_reset(tp);
4947
4948         tg3_write_sig_legacy(tp, kind);
4949         tg3_write_sig_post_reset(tp, kind);
4950
4951         if (err)
4952                 return err;
4953
4954         return 0;
4955 }
4956
4957 #define TG3_FW_RELEASE_MAJOR    0x0
4958 #define TG3_FW_RELASE_MINOR     0x0
4959 #define TG3_FW_RELEASE_FIX      0x0
4960 #define TG3_FW_START_ADDR       0x08000000
4961 #define TG3_FW_TEXT_ADDR        0x08000000
4962 #define TG3_FW_TEXT_LEN         0x9c0
4963 #define TG3_FW_RODATA_ADDR      0x080009c0
4964 #define TG3_FW_RODATA_LEN       0x60
4965 #define TG3_FW_DATA_ADDR        0x08000a40
4966 #define TG3_FW_DATA_LEN         0x20
4967 #define TG3_FW_SBSS_ADDR        0x08000a60
4968 #define TG3_FW_SBSS_LEN         0xc
4969 #define TG3_FW_BSS_ADDR         0x08000a70
4970 #define TG3_FW_BSS_LEN          0x10
4971
4972 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4973         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4974         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4975         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4976         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4977         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4978         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4979         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4980         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4981         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4982         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4983         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4984         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4985         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4986         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4987         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4988         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4989         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4990         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4991         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4992         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4993         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4994         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4995         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4996         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4997         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4998         0, 0, 0, 0, 0, 0,
4999         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5000         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5001         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5002         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5003         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5004         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5005         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5006         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5007         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5008         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5009         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5010         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5011         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5012         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5013         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5014         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5015         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5016         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5017         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5018         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5019         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5020         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5021         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5022         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5023         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5024         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5025         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5026         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5027         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5028         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5029         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5030         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5031         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5032         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5033         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5034         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5035         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5036         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5037         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5038         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5039         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5040         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5041         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5042         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5043         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5044         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5045         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5046         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5047         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5048         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5049         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5050         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5051         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5052         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5053         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5054         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5055         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5056         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5057         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5058         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5059         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5060         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5061         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5062         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5063         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5064 };
5065
5066 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5067         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5068         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5069         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5070         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5071         0x00000000
5072 };
5073
5074 #if 0 /* All zeros, don't eat up space with it. */
5075 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5076         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5077         0x00000000, 0x00000000, 0x00000000, 0x00000000
5078 };
5079 #endif
5080
5081 #define RX_CPU_SCRATCH_BASE     0x30000
5082 #define RX_CPU_SCRATCH_SIZE     0x04000
5083 #define TX_CPU_SCRATCH_BASE     0x34000
5084 #define TX_CPU_SCRATCH_SIZE     0x04000
5085
5086 /* tp->lock is held. */
5087 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5088 {
5089         int i;
5090
5091         BUG_ON(offset == TX_CPU_BASE &&
5092             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5093
5094         if (offset == RX_CPU_BASE) {
5095                 for (i = 0; i < 10000; i++) {
5096                         tw32(offset + CPU_STATE, 0xffffffff);
5097                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5098                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5099                                 break;
5100                 }
5101
5102                 tw32(offset + CPU_STATE, 0xffffffff);
5103                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5104                 udelay(10);
5105         } else {
5106                 for (i = 0; i < 10000; i++) {
5107                         tw32(offset + CPU_STATE, 0xffffffff);
5108                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5109                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5110                                 break;
5111                 }
5112         }
5113
5114         if (i >= 10000) {
5115                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5116                        "and %s CPU\n",
5117                        tp->dev->name,
5118                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5119                 return -ENODEV;
5120         }
5121
5122         /* Clear firmware's nvram arbitration. */
5123         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5124                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5125         return 0;
5126 }
5127
5128 struct fw_info {
5129         unsigned int text_base;
5130         unsigned int text_len;
5131         u32 *text_data;
5132         unsigned int rodata_base;
5133         unsigned int rodata_len;
5134         u32 *rodata_data;
5135         unsigned int data_base;
5136         unsigned int data_len;
5137         u32 *data_data;
5138 };
5139
5140 /* tp->lock is held. */
5141 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5142                                  int cpu_scratch_size, struct fw_info *info)
5143 {
5144         int err, lock_err, i;
5145         void (*write_op)(struct tg3 *, u32, u32);
5146
5147         if (cpu_base == TX_CPU_BASE &&
5148             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5149                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5150                        "TX cpu firmware on %s which is 5705.\n",
5151                        tp->dev->name);
5152                 return -EINVAL;
5153         }
5154
5155         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5156                 write_op = tg3_write_mem;
5157         else
5158                 write_op = tg3_write_indirect_reg32;
5159
5160         /* It is possible that bootcode is still loading at this point.
5161          * Get the nvram lock first before halting the cpu.
5162          */
5163         lock_err = tg3_nvram_lock(tp);
5164         err = tg3_halt_cpu(tp, cpu_base);
5165         if (!lock_err)
5166                 tg3_nvram_unlock(tp);
5167         if (err)
5168                 goto out;
5169
5170         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5171                 write_op(tp, cpu_scratch_base + i, 0);
5172         tw32(cpu_base + CPU_STATE, 0xffffffff);
5173         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5174         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5175                 write_op(tp, (cpu_scratch_base +
5176                               (info->text_base & 0xffff) +
5177                               (i * sizeof(u32))),
5178                          (info->text_data ?
5179                           info->text_data[i] : 0));
5180         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5181                 write_op(tp, (cpu_scratch_base +
5182                               (info->rodata_base & 0xffff) +
5183                               (i * sizeof(u32))),
5184                          (info->rodata_data ?
5185                           info->rodata_data[i] : 0));
5186         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5187                 write_op(tp, (cpu_scratch_base +
5188                               (info->data_base & 0xffff) +
5189                               (i * sizeof(u32))),
5190                          (info->data_data ?
5191                           info->data_data[i] : 0));
5192
5193         err = 0;
5194
5195 out:
5196         return err;
5197 }
5198
5199 /* tp->lock is held. */
5200 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5201 {
5202         struct fw_info info;
5203         int err, i;
5204
5205         info.text_base = TG3_FW_TEXT_ADDR;
5206         info.text_len = TG3_FW_TEXT_LEN;
5207         info.text_data = &tg3FwText[0];
5208         info.rodata_base = TG3_FW_RODATA_ADDR;
5209         info.rodata_len = TG3_FW_RODATA_LEN;
5210         info.rodata_data = &tg3FwRodata[0];
5211         info.data_base = TG3_FW_DATA_ADDR;
5212         info.data_len = TG3_FW_DATA_LEN;
5213         info.data_data = NULL;
5214
5215         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5216                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5217                                     &info);
5218         if (err)
5219                 return err;
5220
5221         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5222                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5223                                     &info);
5224         if (err)
5225                 return err;
5226
5227         /* Now startup only the RX cpu. */
5228         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5229         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5230
5231         for (i = 0; i < 5; i++) {
5232                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5233                         break;
5234                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5235                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5236                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5237                 udelay(1000);
5238         }
5239         if (i >= 5) {
5240                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5241                        "to set RX CPU PC, is %08x should be %08x\n",
5242                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5243                        TG3_FW_TEXT_ADDR);
5244                 return -ENODEV;
5245         }
5246         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5247         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5248
5249         return 0;
5250 }
5251
5252 #if TG3_TSO_SUPPORT != 0
5253
5254 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5255 #define TG3_TSO_FW_RELASE_MINOR         0x6
5256 #define TG3_TSO_FW_RELEASE_FIX          0x0
5257 #define TG3_TSO_FW_START_ADDR           0x08000000
5258 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5259 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5260 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5261 #define TG3_TSO_FW_RODATA_LEN           0x60
5262 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5263 #define TG3_TSO_FW_DATA_LEN             0x30
5264 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5265 #define TG3_TSO_FW_SBSS_LEN             0x2c
5266 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5267 #define TG3_TSO_FW_BSS_LEN              0x894
5268
5269 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5270         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5271         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5272         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5273         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5274         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5275         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5276         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5277         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5278         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5279         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5280         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5281         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5282         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5283         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5284         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5285         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5286         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5287         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5288         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5289         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5290         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5291         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5292         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5293         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5294         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5295         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5296         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5297         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5298         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5299         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5300         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5301         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5302         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5303         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5304         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5305         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5306         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5307         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5308         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5309         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5310         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5311         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5312         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5313         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5314         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5315         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5316         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5317         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5318         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5319         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5320         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5321         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5322         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5323         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5324         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5325         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5326         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5327         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5328         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5329         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5330         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5331         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5332         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5333         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5334         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5335         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5336         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5337         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5338         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5339         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5340         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5341         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5342         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5343         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5344         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5345         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5346         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5347         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5348         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5349         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5350         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5351         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5352         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5353         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5354         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5355         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5356         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5357         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5358         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5359         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5360         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5361         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5362         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5363         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5364         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5365         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5366         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5367         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5368         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5369         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5370         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5371         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5372         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5373         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5374         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5375         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5376         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5377         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5378         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5379         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5380         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5381         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5382         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5383         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5384         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5385         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5386         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5387         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5388         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5389         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5390         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5391         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5392         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5393         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5394         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5395         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5396         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5397         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5398         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5399         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5400         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5401         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5402         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5403         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5404         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5405         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5406         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5407         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5408         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5409         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5410         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5411         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5412         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5413         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5414         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5415         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5416         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5417         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5418         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5419         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5420         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5421         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5422         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5423         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5424         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5425         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5426         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5427         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5428         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5429         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5430         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5431         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5432         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5433         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5434         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5435         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5436         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5437         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5438         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5439         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5440         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5441         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5442         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5443         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5444         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5445         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5446         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5447         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5448         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5449         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5450         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5451         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5452         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5453         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5454         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5455         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5456         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5457         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5458         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5459         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5460         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5461         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5462         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5463         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5464         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5465         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5466         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5467         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5468         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5469         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5470         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5471         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5472         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5473         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5474         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5475         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5476         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5477         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5478         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5479         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5480         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5481         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5482         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5483         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5484         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5485         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5486         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5487         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5488         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5489         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5490         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5491         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5492         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5493         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5494         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5495         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5496         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5497         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5498         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5499         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5500         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5501         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5502         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5503         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5504         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5505         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5506         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5507         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5508         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5509         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5510         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5511         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5512         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5513         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5514         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5515         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5516         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5517         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5518         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5519         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5520         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5521         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5522         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5523         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5524         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5525         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5526         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5527         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5528         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5529         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5530         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5531         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5532         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5533         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5534         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5535         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5536         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5537         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5538         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5539         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5540         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5541         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5542         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5543         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5544         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5545         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5546         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5547         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5548         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5549         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5550         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5551         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5552         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5553         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5554 };
5555
5556 static u32 tg3TsoFwRodata[] = {
5557         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5558         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5559         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5560         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5561         0x00000000,
5562 };
5563
5564 static u32 tg3TsoFwData[] = {
5565         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5566         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5567         0x00000000,
5568 };
5569
5570 /* 5705 needs a special version of the TSO firmware.  */
5571 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5572 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5573 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5574 #define TG3_TSO5_FW_START_ADDR          0x00010000
5575 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5576 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5577 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5578 #define TG3_TSO5_FW_RODATA_LEN          0x50
5579 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5580 #define TG3_TSO5_FW_DATA_LEN            0x20
5581 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5582 #define TG3_TSO5_FW_SBSS_LEN            0x28
5583 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5584 #define TG3_TSO5_FW_BSS_LEN             0x88
5585
5586 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5587         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5588         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5589         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5590         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5591         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5592         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5593         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5594         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5595         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5596         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5597         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5598         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5599         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5600         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5601         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5602         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5603         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5604         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5605         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5606         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5607         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5608         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5609         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5610         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5611         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5612         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5613         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5614         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5615         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5616         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5617         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5618         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5619         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5620         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5621         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5622         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5623         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5624         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5625         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5626         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5627         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5628         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5629         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5630         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5631         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5632         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5633         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5634         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5635         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5636         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5637         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5638         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5639         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5640         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5641         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5642         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5643         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5644         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5645         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5646         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5647         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5648         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5649         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5650         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5651         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5652         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5653         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5654         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5655         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5656         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5657         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5658         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5659         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5660         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5661         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5662         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5663         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5664         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5665         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5666         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5667         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5668         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5669         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5670         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5671         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5672         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5673         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5674         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5675         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5676         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5677         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5678         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5679         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5680         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5681         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5682         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5683         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5684         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5685         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5686         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5687         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5688         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5689         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5690         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5691         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5692         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5693         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5694         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5695         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5696         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5697         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5698         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5699         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5700         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5701         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5702         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5703         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5704         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5705         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5706         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5707         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5708         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5709         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5710         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5711         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5712         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5713         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5714         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5715         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5716         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5717         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5718         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5719         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5720         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5721         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5722         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5723         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5724         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5725         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5726         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5727         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5728         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5729         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5730         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5731         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5732         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5733         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5734         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5735         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5736         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5737         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5738         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5739         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5740         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5741         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5742         0x00000000, 0x00000000, 0x00000000,
5743 };
5744
5745 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5746         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5747         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5748         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5749         0x00000000, 0x00000000, 0x00000000,
5750 };
5751
5752 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5753         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5754         0x00000000, 0x00000000, 0x00000000,
5755 };
5756
5757 /* tp->lock is held. */
5758 static int tg3_load_tso_firmware(struct tg3 *tp)
5759 {
5760         struct fw_info info;
5761         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5762         int err, i;
5763
5764         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5765                 return 0;
5766
5767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5768                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5769                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5770                 info.text_data = &tg3Tso5FwText[0];
5771                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5772                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5773                 info.rodata_data = &tg3Tso5FwRodata[0];
5774                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5775                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5776                 info.data_data = &tg3Tso5FwData[0];
5777                 cpu_base = RX_CPU_BASE;
5778                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5779                 cpu_scratch_size = (info.text_len +
5780                                     info.rodata_len +
5781                                     info.data_len +
5782                                     TG3_TSO5_FW_SBSS_LEN +
5783                                     TG3_TSO5_FW_BSS_LEN);
5784         } else {
5785                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5786                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5787                 info.text_data = &tg3TsoFwText[0];
5788                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5789                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5790                 info.rodata_data = &tg3TsoFwRodata[0];
5791                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5792                 info.data_len = TG3_TSO_FW_DATA_LEN;
5793                 info.data_data = &tg3TsoFwData[0];
5794                 cpu_base = TX_CPU_BASE;
5795                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5796                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5797         }
5798
5799         err = tg3_load_firmware_cpu(tp, cpu_base,
5800                                     cpu_scratch_base, cpu_scratch_size,
5801                                     &info);
5802         if (err)
5803                 return err;
5804
5805         /* Now startup the cpu. */
5806         tw32(cpu_base + CPU_STATE, 0xffffffff);
5807         tw32_f(cpu_base + CPU_PC,    info.text_base);
5808
5809         for (i = 0; i < 5; i++) {
5810                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5811                         break;
5812                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5813                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5814                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5815                 udelay(1000);
5816         }
5817         if (i >= 5) {
5818                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5819                        "to set CPU PC, is %08x should be %08x\n",
5820                        tp->dev->name, tr32(cpu_base + CPU_PC),
5821                        info.text_base);
5822                 return -ENODEV;
5823         }
5824         tw32(cpu_base + CPU_STATE, 0xffffffff);
5825         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5826         return 0;
5827 }
5828
5829 #endif /* TG3_TSO_SUPPORT != 0 */
5830
5831 /* tp->lock is held. */
5832 static void __tg3_set_mac_addr(struct tg3 *tp)
5833 {
5834         u32 addr_high, addr_low;
5835         int i;
5836
5837         addr_high = ((tp->dev->dev_addr[0] << 8) |
5838                      tp->dev->dev_addr[1]);
5839         addr_low = ((tp->dev->dev_addr[2] << 24) |
5840                     (tp->dev->dev_addr[3] << 16) |
5841                     (tp->dev->dev_addr[4] <<  8) |
5842                     (tp->dev->dev_addr[5] <<  0));
5843         for (i = 0; i < 4; i++) {
5844                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5845                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5846         }
5847
5848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5850                 for (i = 0; i < 12; i++) {
5851                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5852                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5853                 }
5854         }
5855
5856         addr_high = (tp->dev->dev_addr[0] +
5857                      tp->dev->dev_addr[1] +
5858                      tp->dev->dev_addr[2] +
5859                      tp->dev->dev_addr[3] +
5860                      tp->dev->dev_addr[4] +
5861                      tp->dev->dev_addr[5]) &
5862                 TX_BACKOFF_SEED_MASK;
5863         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5864 }
5865
5866 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5867 {
5868         struct tg3 *tp = netdev_priv(dev);
5869         struct sockaddr *addr = p;
5870         int err = 0;
5871
5872         if (!is_valid_ether_addr(addr->sa_data))
5873                 return -EINVAL;
5874
5875         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5876
5877         if (!netif_running(dev))
5878                 return 0;
5879
5880         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5881                 /* Reset chip so that ASF can re-init any MAC addresses it
5882                  * needs.
5883                  */
5884                 tg3_netif_stop(tp);
5885                 tg3_full_lock(tp, 1);
5886
5887                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5888                 err = tg3_restart_hw(tp, 0);
5889                 if (!err)
5890                         tg3_netif_start(tp);
5891                 tg3_full_unlock(tp);
5892         } else {
5893                 spin_lock_bh(&tp->lock);
5894                 __tg3_set_mac_addr(tp);
5895                 spin_unlock_bh(&tp->lock);
5896         }
5897
5898         return err;
5899 }
5900
5901 /* tp->lock is held. */
5902 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5903                            dma_addr_t mapping, u32 maxlen_flags,
5904                            u32 nic_addr)
5905 {
5906         tg3_write_mem(tp,
5907                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5908                       ((u64) mapping >> 32));
5909         tg3_write_mem(tp,
5910                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5911                       ((u64) mapping & 0xffffffff));
5912         tg3_write_mem(tp,
5913                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5914                        maxlen_flags);
5915
5916         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5917                 tg3_write_mem(tp,
5918                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5919                               nic_addr);
5920 }
5921
5922 static void __tg3_set_rx_mode(struct net_device *);
5923 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5924 {
5925         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5926         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5927         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5928         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5929         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5930                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5931                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5932         }
5933         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5934         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5935         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5936                 u32 val = ec->stats_block_coalesce_usecs;
5937
5938                 if (!netif_carrier_ok(tp->dev))
5939                         val = 0;
5940
5941                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5942         }
5943 }
5944
5945 /* tp->lock is held. */
5946 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5947 {
5948         u32 val, rdmac_mode;
5949         int i, err, limit;
5950
5951         tg3_disable_ints(tp);
5952
5953         tg3_stop_fw(tp);
5954
5955         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5956
5957         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5958                 tg3_abort_hw(tp, 1);
5959         }
5960
5961         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5962                 tg3_phy_reset(tp);
5963
5964         err = tg3_chip_reset(tp);
5965         if (err)
5966                 return err;
5967
5968         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5969
5970         /* This works around an issue with Athlon chipsets on
5971          * B3 tigon3 silicon.  This bit has no effect on any
5972          * other revision.  But do not set this on PCI Express
5973          * chips.
5974          */
5975         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5976                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5977         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5978
5979         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5980             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5981                 val = tr32(TG3PCI_PCISTATE);
5982                 val |= PCISTATE_RETRY_SAME_DMA;
5983                 tw32(TG3PCI_PCISTATE, val);
5984         }
5985
5986         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5987                 /* Enable some hw fixes.  */
5988                 val = tr32(TG3PCI_MSI_DATA);
5989                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5990                 tw32(TG3PCI_MSI_DATA, val);
5991         }
5992
5993         /* Descriptor ring init may make accesses to the
5994          * NIC SRAM area to setup the TX descriptors, so we
5995          * can only do this after the hardware has been
5996          * successfully reset.
5997          */
5998         err = tg3_init_rings(tp);
5999         if (err)
6000                 return err;
6001
6002         /* This value is determined during the probe time DMA
6003          * engine test, tg3_test_dma.
6004          */
6005         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6006
6007         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6008                           GRC_MODE_4X_NIC_SEND_RINGS |
6009                           GRC_MODE_NO_TX_PHDR_CSUM |
6010                           GRC_MODE_NO_RX_PHDR_CSUM);
6011         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6012
6013         /* Pseudo-header checksum is done by hardware logic and not
6014          * the offload processers, so make the chip do the pseudo-
6015          * header checksums on receive.  For transmit it is more
6016          * convenient to do the pseudo-header checksum in software
6017          * as Linux does that on transmit for us in all cases.
6018          */
6019         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6020
6021         tw32(GRC_MODE,
6022              tp->grc_mode |
6023              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6024
6025         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6026         val = tr32(GRC_MISC_CFG);
6027         val &= ~0xff;
6028         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6029         tw32(GRC_MISC_CFG, val);
6030
6031         /* Initialize MBUF/DESC pool. */
6032         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6033                 /* Do nothing.  */
6034         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6035                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6037                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6038                 else
6039                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6040                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6041                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6042         }
6043 #if TG3_TSO_SUPPORT != 0
6044         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6045                 int fw_len;
6046
6047                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6048                           TG3_TSO5_FW_RODATA_LEN +
6049                           TG3_TSO5_FW_DATA_LEN +
6050                           TG3_TSO5_FW_SBSS_LEN +
6051                           TG3_TSO5_FW_BSS_LEN);
6052                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6053                 tw32(BUFMGR_MB_POOL_ADDR,
6054                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6055                 tw32(BUFMGR_MB_POOL_SIZE,
6056                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6057         }
6058 #endif
6059
6060         if (tp->dev->mtu <= ETH_DATA_LEN) {
6061                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6062                      tp->bufmgr_config.mbuf_read_dma_low_water);
6063                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6064                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6065                 tw32(BUFMGR_MB_HIGH_WATER,
6066                      tp->bufmgr_config.mbuf_high_water);
6067         } else {
6068                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6069                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6070                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6071                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6072                 tw32(BUFMGR_MB_HIGH_WATER,
6073                      tp->bufmgr_config.mbuf_high_water_jumbo);
6074         }
6075         tw32(BUFMGR_DMA_LOW_WATER,
6076              tp->bufmgr_config.dma_low_water);
6077         tw32(BUFMGR_DMA_HIGH_WATER,
6078              tp->bufmgr_config.dma_high_water);
6079
6080         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6081         for (i = 0; i < 2000; i++) {
6082                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6083                         break;
6084                 udelay(10);
6085         }
6086         if (i >= 2000) {
6087                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6088                        tp->dev->name);
6089                 return -ENODEV;
6090         }
6091
6092         /* Setup replenish threshold. */
6093         val = tp->rx_pending / 8;
6094         if (val == 0)
6095                 val = 1;
6096         else if (val > tp->rx_std_max_post)
6097                 val = tp->rx_std_max_post;
6098
6099         tw32(RCVBDI_STD_THRESH, val);
6100
6101         /* Initialize TG3_BDINFO's at:
6102          *  RCVDBDI_STD_BD:     standard eth size rx ring
6103          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6104          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6105          *
6106          * like so:
6107          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6108          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6109          *                              ring attribute flags
6110          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6111          *
6112          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6113          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6114          *
6115          * The size of each ring is fixed in the firmware, but the location is
6116          * configurable.
6117          */
6118         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6119              ((u64) tp->rx_std_mapping >> 32));
6120         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6121              ((u64) tp->rx_std_mapping & 0xffffffff));
6122         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6123              NIC_SRAM_RX_BUFFER_DESC);
6124
6125         /* Don't even try to program the JUMBO/MINI buffer descriptor
6126          * configs on 5705.
6127          */
6128         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6129                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6130                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6131         } else {
6132                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6133                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6134
6135                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6136                      BDINFO_FLAGS_DISABLED);
6137
6138                 /* Setup replenish threshold. */
6139                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6140
6141                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6142                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6143                              ((u64) tp->rx_jumbo_mapping >> 32));
6144                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6145                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6146                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6147                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6148                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6149                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6150                 } else {
6151                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6152                              BDINFO_FLAGS_DISABLED);
6153                 }
6154
6155         }
6156
6157         /* There is only one send ring on 5705/5750, no need to explicitly
6158          * disable the others.
6159          */
6160         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6161                 /* Clear out send RCB ring in SRAM. */
6162                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6163                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6164                                       BDINFO_FLAGS_DISABLED);
6165         }
6166
6167         tp->tx_prod = 0;
6168         tp->tx_cons = 0;
6169         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6170         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6171
6172         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6173                        tp->tx_desc_mapping,
6174                        (TG3_TX_RING_SIZE <<
6175                         BDINFO_FLAGS_MAXLEN_SHIFT),
6176                        NIC_SRAM_TX_BUFFER_DESC);
6177
6178         /* There is only one receive return ring on 5705/5750, no need
6179          * to explicitly disable the others.
6180          */
6181         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6182                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6183                      i += TG3_BDINFO_SIZE) {
6184                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6185                                       BDINFO_FLAGS_DISABLED);
6186                 }
6187         }
6188
6189         tp->rx_rcb_ptr = 0;
6190         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6191
6192         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6193                        tp->rx_rcb_mapping,
6194                        (TG3_RX_RCB_RING_SIZE(tp) <<
6195                         BDINFO_FLAGS_MAXLEN_SHIFT),
6196                        0);
6197
6198         tp->rx_std_ptr = tp->rx_pending;
6199         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6200                      tp->rx_std_ptr);
6201
6202         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6203                                                 tp->rx_jumbo_pending : 0;
6204         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6205                      tp->rx_jumbo_ptr);
6206
6207         /* Initialize MAC address and backoff seed. */
6208         __tg3_set_mac_addr(tp);
6209
6210         /* MTU + ethernet header + FCS + optional VLAN tag */
6211         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6212
6213         /* The slot time is changed by tg3_setup_phy if we
6214          * run at gigabit with half duplex.
6215          */
6216         tw32(MAC_TX_LENGTHS,
6217              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6218              (6 << TX_LENGTHS_IPG_SHIFT) |
6219              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6220
6221         /* Receive rules. */
6222         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6223         tw32(RCVLPC_CONFIG, 0x0181);
6224
6225         /* Calculate RDMAC_MODE setting early, we need it to determine
6226          * the RCVLPC_STATE_ENABLE mask.
6227          */
6228         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6229                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6230                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6231                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6232                       RDMAC_MODE_LNGREAD_ENAB);
6233         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6234                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6235
6236         /* If statement applies to 5705 and 5750 PCI devices only */
6237         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6238              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6239             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6240                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6241                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6242                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6243                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6244                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6245                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6246                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6247                 }
6248         }
6249
6250         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6251                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6252
6253 #if TG3_TSO_SUPPORT != 0
6254         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6255                 rdmac_mode |= (1 << 27);
6256 #endif
6257
6258         /* Receive/send statistics. */
6259         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6260                 val = tr32(RCVLPC_STATS_ENABLE);
6261                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6262                 tw32(RCVLPC_STATS_ENABLE, val);
6263         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6264                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6265                 val = tr32(RCVLPC_STATS_ENABLE);
6266                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6267                 tw32(RCVLPC_STATS_ENABLE, val);
6268         } else {
6269                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6270         }
6271         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6272         tw32(SNDDATAI_STATSENAB, 0xffffff);
6273         tw32(SNDDATAI_STATSCTRL,
6274              (SNDDATAI_SCTRL_ENABLE |
6275               SNDDATAI_SCTRL_FASTUPD));
6276
6277         /* Setup host coalescing engine. */
6278         tw32(HOSTCC_MODE, 0);
6279         for (i = 0; i < 2000; i++) {
6280                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6281                         break;
6282                 udelay(10);
6283         }
6284
6285         __tg3_set_coalesce(tp, &tp->coal);
6286
6287         /* set status block DMA address */
6288         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6289              ((u64) tp->status_mapping >> 32));
6290         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6291              ((u64) tp->status_mapping & 0xffffffff));
6292
6293         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6294                 /* Status/statistics block address.  See tg3_timer,
6295                  * the tg3_periodic_fetch_stats call there, and
6296                  * tg3_get_stats to see how this works for 5705/5750 chips.
6297                  */
6298                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6299                      ((u64) tp->stats_mapping >> 32));
6300                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6301                      ((u64) tp->stats_mapping & 0xffffffff));
6302                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6303                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6304         }
6305
6306         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6307
6308         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6309         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6310         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6311                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6312
6313         /* Clear statistics/status block in chip, and status block in ram. */
6314         for (i = NIC_SRAM_STATS_BLK;
6315              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6316              i += sizeof(u32)) {
6317                 tg3_write_mem(tp, i, 0);
6318                 udelay(40);
6319         }
6320         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6321
6322         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6323                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6324                 /* reset to prevent losing 1st rx packet intermittently */
6325                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6326                 udelay(10);
6327         }
6328
6329         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6330                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6331         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6332         udelay(40);
6333
6334         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6335          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6336          * register to preserve the GPIO settings for LOMs. The GPIOs,
6337          * whether used as inputs or outputs, are set by boot code after
6338          * reset.
6339          */
6340         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6341                 u32 gpio_mask;
6342
6343                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6344                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6345
6346                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6347                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6348                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6349
6350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6351                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6352
6353                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6354
6355                 /* GPIO1 must be driven high for eeprom write protect */
6356                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6357                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6358         }
6359         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6360         udelay(100);
6361
6362         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6363         tp->last_tag = 0;
6364
6365         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6366                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6367                 udelay(40);
6368         }
6369
6370         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6371                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6372                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6373                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6374                WDMAC_MODE_LNGREAD_ENAB);
6375
6376         /* If statement applies to 5705 and 5750 PCI devices only */
6377         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6378              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6380                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6381                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6382                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6383                         /* nothing */
6384                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6385                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6386                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6387                         val |= WDMAC_MODE_RX_ACCEL;
6388                 }
6389         }
6390
6391         /* Enable host coalescing bug fix */
6392         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6393             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6394                 val |= (1 << 29);
6395
6396         tw32_f(WDMAC_MODE, val);
6397         udelay(40);
6398
6399         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6400                 val = tr32(TG3PCI_X_CAPS);
6401                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6402                         val &= ~PCIX_CAPS_BURST_MASK;
6403                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6404                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6405                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6406                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6407                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6408                                 val |= (tp->split_mode_max_reqs <<
6409                                         PCIX_CAPS_SPLIT_SHIFT);
6410                 }
6411                 tw32(TG3PCI_X_CAPS, val);
6412         }
6413
6414         tw32_f(RDMAC_MODE, rdmac_mode);
6415         udelay(40);
6416
6417         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6418         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6419                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6420         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6421         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6422         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6423         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6424         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6425 #if TG3_TSO_SUPPORT != 0
6426         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6427                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6428 #endif
6429         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6430         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6431
6432         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6433                 err = tg3_load_5701_a0_firmware_fix(tp);
6434                 if (err)
6435                         return err;
6436         }
6437
6438 #if TG3_TSO_SUPPORT != 0
6439         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6440                 err = tg3_load_tso_firmware(tp);
6441                 if (err)
6442                         return err;
6443         }
6444 #endif
6445
6446         tp->tx_mode = TX_MODE_ENABLE;
6447         tw32_f(MAC_TX_MODE, tp->tx_mode);
6448         udelay(100);
6449
6450         tp->rx_mode = RX_MODE_ENABLE;
6451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6452                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6453
6454         tw32_f(MAC_RX_MODE, tp->rx_mode);
6455         udelay(10);
6456
6457         if (tp->link_config.phy_is_low_power) {
6458                 tp->link_config.phy_is_low_power = 0;
6459                 tp->link_config.speed = tp->link_config.orig_speed;
6460                 tp->link_config.duplex = tp->link_config.orig_duplex;
6461                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6462         }
6463
6464         tp->mi_mode = MAC_MI_MODE_BASE;
6465         tw32_f(MAC_MI_MODE, tp->mi_mode);
6466         udelay(80);
6467
6468         tw32(MAC_LED_CTRL, tp->led_ctrl);
6469
6470         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6471         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6472                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6473                 udelay(10);
6474         }
6475         tw32_f(MAC_RX_MODE, tp->rx_mode);
6476         udelay(10);
6477
6478         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6479                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6480                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6481                         /* Set drive transmission level to 1.2V  */
6482                         /* only if the signal pre-emphasis bit is not set  */
6483                         val = tr32(MAC_SERDES_CFG);
6484                         val &= 0xfffff000;
6485                         val |= 0x880;
6486                         tw32(MAC_SERDES_CFG, val);
6487                 }
6488                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6489                         tw32(MAC_SERDES_CFG, 0x616000);
6490         }
6491
6492         /* Prevent chip from dropping frames when flow control
6493          * is enabled.
6494          */
6495         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6496
6497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6498             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6499                 /* Use hardware link auto-negotiation */
6500                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6501         }
6502
6503         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6505                 u32 tmp;
6506
6507                 tmp = tr32(SERDES_RX_CTRL);
6508                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6509                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6510                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6511                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6512         }
6513
6514         err = tg3_setup_phy(tp, reset_phy);
6515         if (err)
6516                 return err;
6517
6518         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6519                 u32 tmp;
6520
6521                 /* Clear CRC stats. */
6522                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6523                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6524                         tg3_readphy(tp, 0x14, &tmp);
6525                 }
6526         }
6527
6528         __tg3_set_rx_mode(tp->dev);
6529
6530         /* Initialize receive rules. */
6531         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6532         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6533         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6534         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6535
6536         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6537             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6538                 limit = 8;
6539         else
6540                 limit = 16;
6541         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6542                 limit -= 4;
6543         switch (limit) {
6544         case 16:
6545                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6546         case 15:
6547                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6548         case 14:
6549                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6550         case 13:
6551                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6552         case 12:
6553                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6554         case 11:
6555                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6556         case 10:
6557                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6558         case 9:
6559                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6560         case 8:
6561                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6562         case 7:
6563                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6564         case 6:
6565                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6566         case 5:
6567                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6568         case 4:
6569                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6570         case 3:
6571                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6572         case 2:
6573         case 1:
6574
6575         default:
6576                 break;
6577         };
6578
6579         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6580
6581         return 0;
6582 }
6583
6584 /* Called at device open time to get the chip ready for
6585  * packet processing.  Invoked with tp->lock held.
6586  */
6587 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6588 {
6589         int err;
6590
6591         /* Force the chip into D0. */
6592         err = tg3_set_power_state(tp, PCI_D0);
6593         if (err)
6594                 goto out;
6595
6596         tg3_switch_clocks(tp);
6597
6598         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6599
6600         err = tg3_reset_hw(tp, reset_phy);
6601
6602 out:
6603         return err;
6604 }
6605
6606 #define TG3_STAT_ADD32(PSTAT, REG) \
6607 do {    u32 __val = tr32(REG); \
6608         (PSTAT)->low += __val; \
6609         if ((PSTAT)->low < __val) \
6610                 (PSTAT)->high += 1; \
6611 } while (0)
6612
6613 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6614 {
6615         struct tg3_hw_stats *sp = tp->hw_stats;
6616
6617         if (!netif_carrier_ok(tp->dev))
6618                 return;
6619
6620         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6621         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6622         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6623         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6624         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6625         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6626         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6627         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6628         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6629         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6630         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6631         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6632         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6633
6634         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6635         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6636         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6637         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6638         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6639         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6640         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6641         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6642         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6643         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6644         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6645         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6646         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6647         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6648
6649         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6650         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6651         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6652 }
6653
6654 static void tg3_timer(unsigned long __opaque)
6655 {
6656         struct tg3 *tp = (struct tg3 *) __opaque;
6657
6658         if (tp->irq_sync)
6659                 goto restart_timer;
6660
6661         spin_lock(&tp->lock);
6662
6663         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6664                 /* All of this garbage is because when using non-tagged
6665                  * IRQ status the mailbox/status_block protocol the chip
6666                  * uses with the cpu is race prone.
6667                  */
6668                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6669                         tw32(GRC_LOCAL_CTRL,
6670                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6671                 } else {
6672                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6673                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6674                 }
6675
6676                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6677                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6678                         spin_unlock(&tp->lock);
6679                         schedule_work(&tp->reset_task);
6680                         return;
6681                 }
6682         }
6683
6684         /* This part only runs once per second. */
6685         if (!--tp->timer_counter) {
6686                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6687                         tg3_periodic_fetch_stats(tp);
6688
6689                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6690                         u32 mac_stat;
6691                         int phy_event;
6692
6693                         mac_stat = tr32(MAC_STATUS);
6694
6695                         phy_event = 0;
6696                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6697                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6698                                         phy_event = 1;
6699                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6700                                 phy_event = 1;
6701
6702                         if (phy_event)
6703                                 tg3_setup_phy(tp, 0);
6704                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6705                         u32 mac_stat = tr32(MAC_STATUS);
6706                         int need_setup = 0;
6707
6708                         if (netif_carrier_ok(tp->dev) &&
6709                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6710                                 need_setup = 1;
6711                         }
6712                         if (! netif_carrier_ok(tp->dev) &&
6713                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6714                                          MAC_STATUS_SIGNAL_DET))) {
6715                                 need_setup = 1;
6716                         }
6717                         if (need_setup) {
6718                                 tw32_f(MAC_MODE,
6719                                      (tp->mac_mode &
6720                                       ~MAC_MODE_PORT_MODE_MASK));
6721                                 udelay(40);
6722                                 tw32_f(MAC_MODE, tp->mac_mode);
6723                                 udelay(40);
6724                                 tg3_setup_phy(tp, 0);
6725                         }
6726                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6727                         tg3_serdes_parallel_detect(tp);
6728
6729                 tp->timer_counter = tp->timer_multiplier;
6730         }
6731
6732         /* Heartbeat is only sent once every 2 seconds.  */
6733         if (!--tp->asf_counter) {
6734                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6735                         u32 val;
6736
6737                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6738                                       FWCMD_NICDRV_ALIVE2);
6739                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6740                         /* 5 seconds timeout */
6741                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6742                         val = tr32(GRC_RX_CPU_EVENT);
6743                         val |= (1 << 14);
6744                         tw32(GRC_RX_CPU_EVENT, val);
6745                 }
6746                 tp->asf_counter = tp->asf_multiplier;
6747         }
6748
6749         spin_unlock(&tp->lock);
6750
6751 restart_timer:
6752         tp->timer.expires = jiffies + tp->timer_offset;
6753         add_timer(&tp->timer);
6754 }
6755
6756 static int tg3_request_irq(struct tg3 *tp)
6757 {
6758         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6759         unsigned long flags;
6760         struct net_device *dev = tp->dev;
6761
6762         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6763                 fn = tg3_msi;
6764                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6765                         fn = tg3_msi_1shot;
6766                 flags = IRQF_SAMPLE_RANDOM;
6767         } else {
6768                 fn = tg3_interrupt;
6769                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6770                         fn = tg3_interrupt_tagged;
6771                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6772         }
6773         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6774 }
6775
6776 static int tg3_test_interrupt(struct tg3 *tp)
6777 {
6778         struct net_device *dev = tp->dev;
6779         int err, i;
6780         u32 int_mbox = 0;
6781
6782         if (!netif_running(dev))
6783                 return -ENODEV;
6784
6785         tg3_disable_ints(tp);
6786
6787         free_irq(tp->pdev->irq, dev);
6788
6789         err = request_irq(tp->pdev->irq, tg3_test_isr,
6790                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6791         if (err)
6792                 return err;
6793
6794         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6795         tg3_enable_ints(tp);
6796
6797         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6798                HOSTCC_MODE_NOW);
6799
6800         for (i = 0; i < 5; i++) {
6801                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6802                                         TG3_64BIT_REG_LOW);
6803                 if (int_mbox != 0)
6804                         break;
6805                 msleep(10);
6806         }
6807
6808         tg3_disable_ints(tp);
6809
6810         free_irq(tp->pdev->irq, dev);
6811         
6812         err = tg3_request_irq(tp);
6813
6814         if (err)
6815                 return err;
6816
6817         if (int_mbox != 0)
6818                 return 0;
6819
6820         return -EIO;
6821 }
6822
6823 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6824  * successfully restored
6825  */
6826 static int tg3_test_msi(struct tg3 *tp)
6827 {
6828         struct net_device *dev = tp->dev;
6829         int err;
6830         u16 pci_cmd;
6831
6832         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6833                 return 0;
6834
6835         /* Turn off SERR reporting in case MSI terminates with Master
6836          * Abort.
6837          */
6838         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6839         pci_write_config_word(tp->pdev, PCI_COMMAND,
6840                               pci_cmd & ~PCI_COMMAND_SERR);
6841
6842         err = tg3_test_interrupt(tp);
6843
6844         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6845
6846         if (!err)
6847                 return 0;
6848
6849         /* other failures */
6850         if (err != -EIO)
6851                 return err;
6852
6853         /* MSI test failed, go back to INTx mode */
6854         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6855                "switching to INTx mode. Please report this failure to "
6856                "the PCI maintainer and include system chipset information.\n",
6857                        tp->dev->name);
6858
6859         free_irq(tp->pdev->irq, dev);
6860         pci_disable_msi(tp->pdev);
6861
6862         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6863
6864         err = tg3_request_irq(tp);
6865         if (err)
6866                 return err;
6867
6868         /* Need to reset the chip because the MSI cycle may have terminated
6869          * with Master Abort.
6870          */
6871         tg3_full_lock(tp, 1);
6872
6873         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6874         err = tg3_init_hw(tp, 1);
6875
6876         tg3_full_unlock(tp);
6877
6878         if (err)
6879                 free_irq(tp->pdev->irq, dev);
6880
6881         return err;
6882 }
6883
6884 static int tg3_open(struct net_device *dev)
6885 {
6886         struct tg3 *tp = netdev_priv(dev);
6887         int err;
6888
6889         tg3_full_lock(tp, 0);
6890
6891         err = tg3_set_power_state(tp, PCI_D0);
6892         if (err)
6893                 return err;
6894
6895         tg3_disable_ints(tp);
6896         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6897
6898         tg3_full_unlock(tp);
6899
6900         /* The placement of this call is tied
6901          * to the setup and use of Host TX descriptors.
6902          */
6903         err = tg3_alloc_consistent(tp);
6904         if (err)
6905                 return err;
6906
6907         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6908             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6909             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6910             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6911               (tp->pdev_peer == tp->pdev))) {
6912                 /* All MSI supporting chips should support tagged
6913                  * status.  Assert that this is the case.
6914                  */
6915                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6916                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6917                                "Not using MSI.\n", tp->dev->name);
6918                 } else if (pci_enable_msi(tp->pdev) == 0) {
6919                         u32 msi_mode;
6920
6921                         msi_mode = tr32(MSGINT_MODE);
6922                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6923                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6924                 }
6925         }
6926         err = tg3_request_irq(tp);
6927
6928         if (err) {
6929                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6930                         pci_disable_msi(tp->pdev);
6931                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6932                 }
6933                 tg3_free_consistent(tp);
6934                 return err;
6935         }
6936
6937         tg3_full_lock(tp, 0);
6938
6939         err = tg3_init_hw(tp, 1);
6940         if (err) {
6941                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6942                 tg3_free_rings(tp);
6943         } else {
6944                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6945                         tp->timer_offset = HZ;
6946                 else
6947                         tp->timer_offset = HZ / 10;
6948
6949                 BUG_ON(tp->timer_offset > HZ);
6950                 tp->timer_counter = tp->timer_multiplier =
6951                         (HZ / tp->timer_offset);
6952                 tp->asf_counter = tp->asf_multiplier =
6953                         ((HZ / tp->timer_offset) * 2);
6954
6955                 init_timer(&tp->timer);
6956                 tp->timer.expires = jiffies + tp->timer_offset;
6957                 tp->timer.data = (unsigned long) tp;
6958                 tp->timer.function = tg3_timer;
6959         }
6960
6961         tg3_full_unlock(tp);
6962
6963         if (err) {
6964                 free_irq(tp->pdev->irq, dev);
6965                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6966                         pci_disable_msi(tp->pdev);
6967                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6968                 }
6969                 tg3_free_consistent(tp);
6970                 return err;
6971         }
6972
6973         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6974                 err = tg3_test_msi(tp);
6975
6976                 if (err) {
6977                         tg3_full_lock(tp, 0);
6978
6979                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6980                                 pci_disable_msi(tp->pdev);
6981                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6982                         }
6983                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6984                         tg3_free_rings(tp);
6985                         tg3_free_consistent(tp);
6986
6987                         tg3_full_unlock(tp);
6988
6989                         return err;
6990                 }
6991
6992                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6993                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6994                                 u32 val = tr32(0x7c04);
6995
6996                                 tw32(0x7c04, val | (1 << 29));
6997                         }
6998                 }
6999         }
7000
7001         tg3_full_lock(tp, 0);
7002
7003         add_timer(&tp->timer);
7004         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7005         tg3_enable_ints(tp);
7006
7007         tg3_full_unlock(tp);
7008
7009         netif_start_queue(dev);
7010
7011         return 0;
7012 }
7013
7014 #if 0
7015 /*static*/ void tg3_dump_state(struct tg3 *tp)
7016 {
7017         u32 val32, val32_2, val32_3, val32_4, val32_5;
7018         u16 val16;
7019         int i;
7020
7021         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7022         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7023         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7024                val16, val32);
7025
7026         /* MAC block */
7027         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7028                tr32(MAC_MODE), tr32(MAC_STATUS));
7029         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7030                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7031         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7032                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7033         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7034                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7035
7036         /* Send data initiator control block */
7037         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7038                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7039         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7040                tr32(SNDDATAI_STATSCTRL));
7041
7042         /* Send data completion control block */
7043         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7044
7045         /* Send BD ring selector block */
7046         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7047                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7048
7049         /* Send BD initiator control block */
7050         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7051                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7052
7053         /* Send BD completion control block */
7054         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7055
7056         /* Receive list placement control block */
7057         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7058                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7059         printk("       RCVLPC_STATSCTRL[%08x]\n",
7060                tr32(RCVLPC_STATSCTRL));
7061
7062         /* Receive data and receive BD initiator control block */
7063         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7064                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7065
7066         /* Receive data completion control block */
7067         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7068                tr32(RCVDCC_MODE));
7069
7070         /* Receive BD initiator control block */
7071         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7072                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7073
7074         /* Receive BD completion control block */
7075         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7076                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7077
7078         /* Receive list selector control block */
7079         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7080                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7081
7082         /* Mbuf cluster free block */
7083         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7084                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7085
7086         /* Host coalescing control block */
7087         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7088                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7089         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7090                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7091                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7092         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7093                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7094                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7095         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7096                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7097         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7098                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7099
7100         /* Memory arbiter control block */
7101         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7102                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7103
7104         /* Buffer manager control block */
7105         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7106                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7107         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7108                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7109         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7110                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7111                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7112                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7113
7114         /* Read DMA control block */
7115         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7116                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7117
7118         /* Write DMA control block */
7119         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7120                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7121
7122         /* DMA completion block */
7123         printk("DEBUG: DMAC_MODE[%08x]\n",
7124                tr32(DMAC_MODE));
7125
7126         /* GRC block */
7127         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7128                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7129         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7130                tr32(GRC_LOCAL_CTRL));
7131
7132         /* TG3_BDINFOs */
7133         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7134                tr32(RCVDBDI_JUMBO_BD + 0x0),
7135                tr32(RCVDBDI_JUMBO_BD + 0x4),
7136                tr32(RCVDBDI_JUMBO_BD + 0x8),
7137                tr32(RCVDBDI_JUMBO_BD + 0xc));
7138         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7139                tr32(RCVDBDI_STD_BD + 0x0),
7140                tr32(RCVDBDI_STD_BD + 0x4),
7141                tr32(RCVDBDI_STD_BD + 0x8),
7142                tr32(RCVDBDI_STD_BD + 0xc));
7143         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7144                tr32(RCVDBDI_MINI_BD + 0x0),
7145                tr32(RCVDBDI_MINI_BD + 0x4),
7146                tr32(RCVDBDI_MINI_BD + 0x8),
7147                tr32(RCVDBDI_MINI_BD + 0xc));
7148
7149         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7150         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7151         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7152         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7153         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7154                val32, val32_2, val32_3, val32_4);
7155
7156         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7157         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7158         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7159         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7160         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7161                val32, val32_2, val32_3, val32_4);
7162
7163         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7164         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7165         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7166         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7167         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7168         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7169                val32, val32_2, val32_3, val32_4, val32_5);
7170
7171         /* SW status block */
7172         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7173                tp->hw_status->status,
7174                tp->hw_status->status_tag,
7175                tp->hw_status->rx_jumbo_consumer,
7176                tp->hw_status->rx_consumer,
7177                tp->hw_status->rx_mini_consumer,
7178                tp->hw_status->idx[0].rx_producer,
7179                tp->hw_status->idx[0].tx_consumer);
7180
7181         /* SW statistics block */
7182         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7183                ((u32 *)tp->hw_stats)[0],
7184                ((u32 *)tp->hw_stats)[1],
7185                ((u32 *)tp->hw_stats)[2],
7186                ((u32 *)tp->hw_stats)[3]);
7187
7188         /* Mailboxes */
7189         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7190                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7191                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7192                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7193                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7194
7195         /* NIC side send descriptors. */
7196         for (i = 0; i < 6; i++) {
7197                 unsigned long txd;
7198
7199                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7200                         + (i * sizeof(struct tg3_tx_buffer_desc));
7201                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7202                        i,
7203                        readl(txd + 0x0), readl(txd + 0x4),
7204                        readl(txd + 0x8), readl(txd + 0xc));
7205         }
7206
7207         /* NIC side RX descriptors. */
7208         for (i = 0; i < 6; i++) {
7209                 unsigned long rxd;
7210
7211                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7212                         + (i * sizeof(struct tg3_rx_buffer_desc));
7213                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7214                        i,
7215                        readl(rxd + 0x0), readl(rxd + 0x4),
7216                        readl(rxd + 0x8), readl(rxd + 0xc));
7217                 rxd += (4 * sizeof(u32));
7218                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7219                        i,
7220                        readl(rxd + 0x0), readl(rxd + 0x4),
7221                        readl(rxd + 0x8), readl(rxd + 0xc));
7222         }
7223
7224         for (i = 0; i < 6; i++) {
7225                 unsigned long rxd;
7226
7227                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7228                         + (i * sizeof(struct tg3_rx_buffer_desc));
7229                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7230                        i,
7231                        readl(rxd + 0x0), readl(rxd + 0x4),
7232                        readl(rxd + 0x8), readl(rxd + 0xc));
7233                 rxd += (4 * sizeof(u32));
7234                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7235                        i,
7236                        readl(rxd + 0x0), readl(rxd + 0x4),
7237                        readl(rxd + 0x8), readl(rxd + 0xc));
7238         }
7239 }
7240 #endif
7241
7242 static struct net_device_stats *tg3_get_stats(struct net_device *);
7243 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7244
7245 static int tg3_close(struct net_device *dev)
7246 {
7247         struct tg3 *tp = netdev_priv(dev);
7248
7249         /* Calling flush_scheduled_work() may deadlock because
7250          * linkwatch_event() may be on the workqueue and it will try to get
7251          * the rtnl_lock which we are holding.
7252          */
7253         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7254                 msleep(1);
7255
7256         netif_stop_queue(dev);
7257
7258         del_timer_sync(&tp->timer);
7259
7260         tg3_full_lock(tp, 1);
7261 #if 0
7262         tg3_dump_state(tp);
7263 #endif
7264
7265         tg3_disable_ints(tp);
7266
7267         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7268         tg3_free_rings(tp);
7269         tp->tg3_flags &=
7270                 ~(TG3_FLAG_INIT_COMPLETE |
7271                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7272
7273         tg3_full_unlock(tp);
7274
7275         free_irq(tp->pdev->irq, dev);
7276         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7277                 pci_disable_msi(tp->pdev);
7278                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7279         }
7280
7281         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7282                sizeof(tp->net_stats_prev));
7283         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7284                sizeof(tp->estats_prev));
7285
7286         tg3_free_consistent(tp);
7287
7288         tg3_set_power_state(tp, PCI_D3hot);
7289
7290         netif_carrier_off(tp->dev);
7291
7292         return 0;
7293 }
7294
7295 static inline unsigned long get_stat64(tg3_stat64_t *val)
7296 {
7297         unsigned long ret;
7298
7299 #if (BITS_PER_LONG == 32)
7300         ret = val->low;
7301 #else
7302         ret = ((u64)val->high << 32) | ((u64)val->low);
7303 #endif
7304         return ret;
7305 }
7306
7307 static unsigned long calc_crc_errors(struct tg3 *tp)
7308 {
7309         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7310
7311         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7312             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7313              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7314                 u32 val;
7315
7316                 spin_lock_bh(&tp->lock);
7317                 if (!tg3_readphy(tp, 0x1e, &val)) {
7318                         tg3_writephy(tp, 0x1e, val | 0x8000);
7319                         tg3_readphy(tp, 0x14, &val);
7320                 } else
7321                         val = 0;
7322                 spin_unlock_bh(&tp->lock);
7323
7324                 tp->phy_crc_errors += val;
7325
7326                 return tp->phy_crc_errors;
7327         }
7328
7329         return get_stat64(&hw_stats->rx_fcs_errors);
7330 }
7331
7332 #define ESTAT_ADD(member) \
7333         estats->member =        old_estats->member + \
7334                                 get_stat64(&hw_stats->member)
7335
7336 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7337 {
7338         struct tg3_ethtool_stats *estats = &tp->estats;
7339         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7340         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7341
7342         if (!hw_stats)
7343                 return old_estats;
7344
7345         ESTAT_ADD(rx_octets);
7346         ESTAT_ADD(rx_fragments);
7347         ESTAT_ADD(rx_ucast_packets);
7348         ESTAT_ADD(rx_mcast_packets);
7349         ESTAT_ADD(rx_bcast_packets);
7350         ESTAT_ADD(rx_fcs_errors);
7351         ESTAT_ADD(rx_align_errors);
7352         ESTAT_ADD(rx_xon_pause_rcvd);
7353         ESTAT_ADD(rx_xoff_pause_rcvd);
7354         ESTAT_ADD(rx_mac_ctrl_rcvd);
7355         ESTAT_ADD(rx_xoff_entered);
7356         ESTAT_ADD(rx_frame_too_long_errors);
7357         ESTAT_ADD(rx_jabbers);
7358         ESTAT_ADD(rx_undersize_packets);
7359         ESTAT_ADD(rx_in_length_errors);
7360         ESTAT_ADD(rx_out_length_errors);
7361         ESTAT_ADD(rx_64_or_less_octet_packets);
7362         ESTAT_ADD(rx_65_to_127_octet_packets);
7363         ESTAT_ADD(rx_128_to_255_octet_packets);
7364         ESTAT_ADD(rx_256_to_511_octet_packets);
7365         ESTAT_ADD(rx_512_to_1023_octet_packets);
7366         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7367         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7368         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7369         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7370         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7371
7372         ESTAT_ADD(tx_octets);
7373         ESTAT_ADD(tx_collisions);
7374         ESTAT_ADD(tx_xon_sent);
7375         ESTAT_ADD(tx_xoff_sent);
7376         ESTAT_ADD(tx_flow_control);
7377         ESTAT_ADD(tx_mac_errors);
7378         ESTAT_ADD(tx_single_collisions);
7379         ESTAT_ADD(tx_mult_collisions);
7380         ESTAT_ADD(tx_deferred);
7381         ESTAT_ADD(tx_excessive_collisions);
7382         ESTAT_ADD(tx_late_collisions);
7383         ESTAT_ADD(tx_collide_2times);
7384         ESTAT_ADD(tx_collide_3times);
7385         ESTAT_ADD(tx_collide_4times);
7386         ESTAT_ADD(tx_collide_5times);
7387         ESTAT_ADD(tx_collide_6times);
7388         ESTAT_ADD(tx_collide_7times);
7389         ESTAT_ADD(tx_collide_8times);
7390         ESTAT_ADD(tx_collide_9times);
7391         ESTAT_ADD(tx_collide_10times);
7392         ESTAT_ADD(tx_collide_11times);
7393         ESTAT_ADD(tx_collide_12times);
7394         ESTAT_ADD(tx_collide_13times);
7395         ESTAT_ADD(tx_collide_14times);
7396         ESTAT_ADD(tx_collide_15times);
7397         ESTAT_ADD(tx_ucast_packets);
7398         ESTAT_ADD(tx_mcast_packets);
7399         ESTAT_ADD(tx_bcast_packets);
7400         ESTAT_ADD(tx_carrier_sense_errors);
7401         ESTAT_ADD(tx_discards);
7402         ESTAT_ADD(tx_errors);
7403
7404         ESTAT_ADD(dma_writeq_full);
7405         ESTAT_ADD(dma_write_prioq_full);
7406         ESTAT_ADD(rxbds_empty);
7407         ESTAT_ADD(rx_discards);
7408         ESTAT_ADD(rx_errors);
7409         ESTAT_ADD(rx_threshold_hit);
7410
7411         ESTAT_ADD(dma_readq_full);
7412         ESTAT_ADD(dma_read_prioq_full);
7413         ESTAT_ADD(tx_comp_queue_full);
7414
7415         ESTAT_ADD(ring_set_send_prod_index);
7416         ESTAT_ADD(ring_status_update);
7417         ESTAT_ADD(nic_irqs);
7418         ESTAT_ADD(nic_avoided_irqs);
7419         ESTAT_ADD(nic_tx_threshold_hit);
7420
7421         return estats;
7422 }
7423
7424 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7425 {
7426         struct tg3 *tp = netdev_priv(dev);
7427         struct net_device_stats *stats = &tp->net_stats;
7428         struct net_device_stats *old_stats = &tp->net_stats_prev;
7429         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7430
7431         if (!hw_stats)
7432                 return old_stats;
7433
7434         stats->rx_packets = old_stats->rx_packets +
7435                 get_stat64(&hw_stats->rx_ucast_packets) +
7436                 get_stat64(&hw_stats->rx_mcast_packets) +
7437                 get_stat64(&hw_stats->rx_bcast_packets);
7438                 
7439         stats->tx_packets = old_stats->tx_packets +
7440                 get_stat64(&hw_stats->tx_ucast_packets) +
7441                 get_stat64(&hw_stats->tx_mcast_packets) +
7442                 get_stat64(&hw_stats->tx_bcast_packets);
7443
7444         stats->rx_bytes = old_stats->rx_bytes +
7445                 get_stat64(&hw_stats->rx_octets);
7446         stats->tx_bytes = old_stats->tx_bytes +
7447                 get_stat64(&hw_stats->tx_octets);
7448
7449         stats->rx_errors = old_stats->rx_errors +
7450                 get_stat64(&hw_stats->rx_errors);
7451         stats->tx_errors = old_stats->tx_errors +
7452                 get_stat64(&hw_stats->tx_errors) +
7453                 get_stat64(&hw_stats->tx_mac_errors) +
7454                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7455                 get_stat64(&hw_stats->tx_discards);
7456
7457         stats->multicast = old_stats->multicast +
7458                 get_stat64(&hw_stats->rx_mcast_packets);
7459         stats->collisions = old_stats->collisions +
7460                 get_stat64(&hw_stats->tx_collisions);
7461
7462         stats->rx_length_errors = old_stats->rx_length_errors +
7463                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7464                 get_stat64(&hw_stats->rx_undersize_packets);
7465
7466         stats->rx_over_errors = old_stats->rx_over_errors +
7467                 get_stat64(&hw_stats->rxbds_empty);
7468         stats->rx_frame_errors = old_stats->rx_frame_errors +
7469                 get_stat64(&hw_stats->rx_align_errors);
7470         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7471                 get_stat64(&hw_stats->tx_discards);
7472         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7473                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7474
7475         stats->rx_crc_errors = old_stats->rx_crc_errors +
7476                 calc_crc_errors(tp);
7477
7478         stats->rx_missed_errors = old_stats->rx_missed_errors +
7479                 get_stat64(&hw_stats->rx_discards);
7480
7481         return stats;
7482 }
7483
7484 static inline u32 calc_crc(unsigned char *buf, int len)
7485 {
7486         u32 reg;
7487         u32 tmp;
7488         int j, k;
7489
7490         reg = 0xffffffff;
7491
7492         for (j = 0; j < len; j++) {
7493                 reg ^= buf[j];
7494
7495                 for (k = 0; k < 8; k++) {
7496                         tmp = reg & 0x01;
7497
7498                         reg >>= 1;
7499
7500                         if (tmp) {
7501                                 reg ^= 0xedb88320;
7502                         }
7503                 }
7504         }
7505
7506         return ~reg;
7507 }
7508
7509 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7510 {
7511         /* accept or reject all multicast frames */
7512         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7513         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7514         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7515         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7516 }
7517
7518 static void __tg3_set_rx_mode(struct net_device *dev)
7519 {
7520         struct tg3 *tp = netdev_priv(dev);
7521         u32 rx_mode;
7522
7523         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7524                                   RX_MODE_KEEP_VLAN_TAG);
7525
7526         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7527          * flag clear.
7528          */
7529 #if TG3_VLAN_TAG_USED
7530         if (!tp->vlgrp &&
7531             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7532                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7533 #else
7534         /* By definition, VLAN is disabled always in this
7535          * case.
7536          */
7537         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7538                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7539 #endif
7540
7541         if (dev->flags & IFF_PROMISC) {
7542                 /* Promiscuous mode. */
7543                 rx_mode |= RX_MODE_PROMISC;
7544         } else if (dev->flags & IFF_ALLMULTI) {
7545                 /* Accept all multicast. */
7546                 tg3_set_multi (tp, 1);
7547         } else if (dev->mc_count < 1) {
7548                 /* Reject all multicast. */
7549                 tg3_set_multi (tp, 0);
7550         } else {
7551                 /* Accept one or more multicast(s). */
7552                 struct dev_mc_list *mclist;
7553                 unsigned int i;
7554                 u32 mc_filter[4] = { 0, };
7555                 u32 regidx;
7556                 u32 bit;
7557                 u32 crc;
7558
7559                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7560                      i++, mclist = mclist->next) {
7561
7562                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7563                         bit = ~crc & 0x7f;
7564                         regidx = (bit & 0x60) >> 5;
7565                         bit &= 0x1f;
7566                         mc_filter[regidx] |= (1 << bit);
7567                 }
7568
7569                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7570                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7571                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7572                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7573         }
7574
7575         if (rx_mode != tp->rx_mode) {
7576                 tp->rx_mode = rx_mode;
7577                 tw32_f(MAC_RX_MODE, rx_mode);
7578                 udelay(10);
7579         }
7580 }
7581
7582 static void tg3_set_rx_mode(struct net_device *dev)
7583 {
7584         struct tg3 *tp = netdev_priv(dev);
7585
7586         if (!netif_running(dev))
7587                 return;
7588
7589         tg3_full_lock(tp, 0);
7590         __tg3_set_rx_mode(dev);
7591         tg3_full_unlock(tp);
7592 }
7593
7594 #define TG3_REGDUMP_LEN         (32 * 1024)
7595
7596 static int tg3_get_regs_len(struct net_device *dev)
7597 {
7598         return TG3_REGDUMP_LEN;
7599 }
7600
7601 static void tg3_get_regs(struct net_device *dev,
7602                 struct ethtool_regs *regs, void *_p)
7603 {
7604         u32 *p = _p;
7605         struct tg3 *tp = netdev_priv(dev);
7606         u8 *orig_p = _p;
7607         int i;
7608
7609         regs->version = 0;
7610
7611         memset(p, 0, TG3_REGDUMP_LEN);
7612
7613         if (tp->link_config.phy_is_low_power)
7614                 return;
7615
7616         tg3_full_lock(tp, 0);
7617
7618 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7619 #define GET_REG32_LOOP(base,len)                \
7620 do {    p = (u32 *)(orig_p + (base));           \
7621         for (i = 0; i < len; i += 4)            \
7622                 __GET_REG32((base) + i);        \
7623 } while (0)
7624 #define GET_REG32_1(reg)                        \
7625 do {    p = (u32 *)(orig_p + (reg));            \
7626         __GET_REG32((reg));                     \
7627 } while (0)
7628
7629         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7630         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7631         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7632         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7633         GET_REG32_1(SNDDATAC_MODE);
7634         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7635         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7636         GET_REG32_1(SNDBDC_MODE);
7637         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7638         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7639         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7640         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7641         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7642         GET_REG32_1(RCVDCC_MODE);
7643         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7644         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7645         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7646         GET_REG32_1(MBFREE_MODE);
7647         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7648         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7649         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7650         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7651         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7652         GET_REG32_1(RX_CPU_MODE);
7653         GET_REG32_1(RX_CPU_STATE);
7654         GET_REG32_1(RX_CPU_PGMCTR);
7655         GET_REG32_1(RX_CPU_HWBKPT);
7656         GET_REG32_1(TX_CPU_MODE);
7657         GET_REG32_1(TX_CPU_STATE);
7658         GET_REG32_1(TX_CPU_PGMCTR);
7659         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7660         GET_REG32_LOOP(FTQ_RESET, 0x120);
7661         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7662         GET_REG32_1(DMAC_MODE);
7663         GET_REG32_LOOP(GRC_MODE, 0x4c);
7664         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7665                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7666
7667 #undef __GET_REG32
7668 #undef GET_REG32_LOOP
7669 #undef GET_REG32_1
7670
7671         tg3_full_unlock(tp);
7672 }
7673
7674 static int tg3_get_eeprom_len(struct net_device *dev)
7675 {
7676         struct tg3 *tp = netdev_priv(dev);
7677
7678         return tp->nvram_size;
7679 }
7680
7681 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7682 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7683
7684 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7685 {
7686         struct tg3 *tp = netdev_priv(dev);
7687         int ret;
7688         u8  *pd;
7689         u32 i, offset, len, val, b_offset, b_count;
7690
7691         if (tp->link_config.phy_is_low_power)
7692                 return -EAGAIN;
7693
7694         offset = eeprom->offset;
7695         len = eeprom->len;
7696         eeprom->len = 0;
7697
7698         eeprom->magic = TG3_EEPROM_MAGIC;
7699
7700         if (offset & 3) {
7701                 /* adjustments to start on required 4 byte boundary */
7702                 b_offset = offset & 3;
7703                 b_count = 4 - b_offset;
7704                 if (b_count > len) {
7705                         /* i.e. offset=1 len=2 */
7706                         b_count = len;
7707                 }
7708                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7709                 if (ret)
7710                         return ret;
7711                 val = cpu_to_le32(val);
7712                 memcpy(data, ((char*)&val) + b_offset, b_count);
7713                 len -= b_count;
7714                 offset += b_count;
7715                 eeprom->len += b_count;
7716         }
7717
7718         /* read bytes upto the last 4 byte boundary */
7719         pd = &data[eeprom->len];
7720         for (i = 0; i < (len - (len & 3)); i += 4) {
7721                 ret = tg3_nvram_read(tp, offset + i, &val);
7722                 if (ret) {
7723                         eeprom->len += i;
7724                         return ret;
7725                 }
7726                 val = cpu_to_le32(val);
7727                 memcpy(pd + i, &val, 4);
7728         }
7729         eeprom->len += i;
7730
7731         if (len & 3) {
7732                 /* read last bytes not ending on 4 byte boundary */
7733                 pd = &data[eeprom->len];
7734                 b_count = len & 3;
7735                 b_offset = offset + len - b_count;
7736                 ret = tg3_nvram_read(tp, b_offset, &val);
7737                 if (ret)
7738                         return ret;
7739                 val = cpu_to_le32(val);
7740                 memcpy(pd, ((char*)&val), b_count);
7741                 eeprom->len += b_count;
7742         }
7743         return 0;
7744 }
7745
7746 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7747
7748 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7749 {
7750         struct tg3 *tp = netdev_priv(dev);
7751         int ret;
7752         u32 offset, len, b_offset, odd_len, start, end;
7753         u8 *buf;
7754
7755         if (tp->link_config.phy_is_low_power)
7756                 return -EAGAIN;
7757
7758         if (eeprom->magic != TG3_EEPROM_MAGIC)
7759                 return -EINVAL;
7760
7761         offset = eeprom->offset;
7762         len = eeprom->len;
7763
7764         if ((b_offset = (offset & 3))) {
7765                 /* adjustments to start on required 4 byte boundary */
7766                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7767                 if (ret)
7768                         return ret;
7769                 start = cpu_to_le32(start);
7770                 len += b_offset;
7771                 offset &= ~3;
7772                 if (len < 4)
7773                         len = 4;
7774         }
7775
7776         odd_len = 0;
7777         if (len & 3) {
7778                 /* adjustments to end on required 4 byte boundary */
7779                 odd_len = 1;
7780                 len = (len + 3) & ~3;
7781                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7782                 if (ret)
7783                         return ret;
7784                 end = cpu_to_le32(end);
7785         }
7786
7787         buf = data;
7788         if (b_offset || odd_len) {
7789                 buf = kmalloc(len, GFP_KERNEL);
7790                 if (buf == 0)
7791                         return -ENOMEM;
7792                 if (b_offset)
7793                         memcpy(buf, &start, 4);
7794                 if (odd_len)
7795                         memcpy(buf+len-4, &end, 4);
7796                 memcpy(buf + b_offset, data, eeprom->len);
7797         }
7798
7799         ret = tg3_nvram_write_block(tp, offset, len, buf);
7800
7801         if (buf != data)
7802                 kfree(buf);
7803
7804         return ret;
7805 }
7806
7807 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7808 {
7809         struct tg3 *tp = netdev_priv(dev);
7810   
7811         cmd->supported = (SUPPORTED_Autoneg);
7812
7813         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7814                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7815                                    SUPPORTED_1000baseT_Full);
7816
7817         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7818                 cmd->supported |= (SUPPORTED_100baseT_Half |
7819                                   SUPPORTED_100baseT_Full |
7820                                   SUPPORTED_10baseT_Half |
7821                                   SUPPORTED_10baseT_Full |
7822                                   SUPPORTED_MII);
7823                 cmd->port = PORT_TP;
7824         } else {
7825                 cmd->supported |= SUPPORTED_FIBRE;
7826                 cmd->port = PORT_FIBRE;
7827         }
7828   
7829         cmd->advertising = tp->link_config.advertising;
7830         if (netif_running(dev)) {
7831                 cmd->speed = tp->link_config.active_speed;
7832                 cmd->duplex = tp->link_config.active_duplex;
7833         }
7834         cmd->phy_address = PHY_ADDR;
7835         cmd->transceiver = 0;
7836         cmd->autoneg = tp->link_config.autoneg;
7837         cmd->maxtxpkt = 0;
7838         cmd->maxrxpkt = 0;
7839         return 0;
7840 }
7841   
7842 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7843 {
7844         struct tg3 *tp = netdev_priv(dev);
7845   
7846         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7847                 /* These are the only valid advertisement bits allowed.  */
7848                 if (cmd->autoneg == AUTONEG_ENABLE &&
7849                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7850                                           ADVERTISED_1000baseT_Full |
7851                                           ADVERTISED_Autoneg |
7852                                           ADVERTISED_FIBRE)))
7853                         return -EINVAL;
7854                 /* Fiber can only do SPEED_1000.  */
7855                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7856                          (cmd->speed != SPEED_1000))
7857                         return -EINVAL;
7858         /* Copper cannot force SPEED_1000.  */
7859         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7860                    (cmd->speed == SPEED_1000))
7861                 return -EINVAL;
7862         else if ((cmd->speed == SPEED_1000) &&
7863                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7864                 return -EINVAL;
7865
7866         tg3_full_lock(tp, 0);
7867
7868         tp->link_config.autoneg = cmd->autoneg;
7869         if (cmd->autoneg == AUTONEG_ENABLE) {
7870                 tp->link_config.advertising = cmd->advertising;
7871                 tp->link_config.speed = SPEED_INVALID;
7872                 tp->link_config.duplex = DUPLEX_INVALID;
7873         } else {
7874                 tp->link_config.advertising = 0;
7875                 tp->link_config.speed = cmd->speed;
7876                 tp->link_config.duplex = cmd->duplex;
7877         }
7878   
7879         if (netif_running(dev))
7880                 tg3_setup_phy(tp, 1);
7881
7882         tg3_full_unlock(tp);
7883   
7884         return 0;
7885 }
7886   
7887 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7888 {
7889         struct tg3 *tp = netdev_priv(dev);
7890   
7891         strcpy(info->driver, DRV_MODULE_NAME);
7892         strcpy(info->version, DRV_MODULE_VERSION);
7893         strcpy(info->fw_version, tp->fw_ver);
7894         strcpy(info->bus_info, pci_name(tp->pdev));
7895 }
7896   
7897 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7898 {
7899         struct tg3 *tp = netdev_priv(dev);
7900   
7901         wol->supported = WAKE_MAGIC;
7902         wol->wolopts = 0;
7903         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7904                 wol->wolopts = WAKE_MAGIC;
7905         memset(&wol->sopass, 0, sizeof(wol->sopass));
7906 }
7907   
7908 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7909 {
7910         struct tg3 *tp = netdev_priv(dev);
7911   
7912         if (wol->wolopts & ~WAKE_MAGIC)
7913                 return -EINVAL;
7914         if ((wol->wolopts & WAKE_MAGIC) &&
7915             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7916             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7917                 return -EINVAL;
7918   
7919         spin_lock_bh(&tp->lock);
7920         if (wol->wolopts & WAKE_MAGIC)
7921                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7922         else
7923                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7924         spin_unlock_bh(&tp->lock);
7925   
7926         return 0;
7927 }
7928   
7929 static u32 tg3_get_msglevel(struct net_device *dev)
7930 {
7931         struct tg3 *tp = netdev_priv(dev);
7932         return tp->msg_enable;
7933 }
7934   
7935 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7936 {
7937         struct tg3 *tp = netdev_priv(dev);
7938         tp->msg_enable = value;
7939 }
7940   
7941 #if TG3_TSO_SUPPORT != 0
7942 static int tg3_set_tso(struct net_device *dev, u32 value)
7943 {
7944         struct tg3 *tp = netdev_priv(dev);
7945
7946         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7947                 if (value)
7948                         return -EINVAL;
7949                 return 0;
7950         }
7951         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7952                 if (value)
7953                         dev->features |= NETIF_F_TSO6;
7954                 else
7955                         dev->features &= ~NETIF_F_TSO6;
7956         }
7957         return ethtool_op_set_tso(dev, value);
7958 }
7959 #endif
7960   
7961 static int tg3_nway_reset(struct net_device *dev)
7962 {
7963         struct tg3 *tp = netdev_priv(dev);
7964         u32 bmcr;
7965         int r;
7966   
7967         if (!netif_running(dev))
7968                 return -EAGAIN;
7969
7970         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7971                 return -EINVAL;
7972
7973         spin_lock_bh(&tp->lock);
7974         r = -EINVAL;
7975         tg3_readphy(tp, MII_BMCR, &bmcr);
7976         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7977             ((bmcr & BMCR_ANENABLE) ||
7978              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7979                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7980                                            BMCR_ANENABLE);
7981                 r = 0;
7982         }
7983         spin_unlock_bh(&tp->lock);
7984   
7985         return r;
7986 }
7987   
7988 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7989 {
7990         struct tg3 *tp = netdev_priv(dev);
7991   
7992         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7993         ering->rx_mini_max_pending = 0;
7994         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7995                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7996         else
7997                 ering->rx_jumbo_max_pending = 0;
7998
7999         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8000
8001         ering->rx_pending = tp->rx_pending;
8002         ering->rx_mini_pending = 0;
8003         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8004                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8005         else
8006                 ering->rx_jumbo_pending = 0;
8007
8008         ering->tx_pending = tp->tx_pending;
8009 }
8010   
8011 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8012 {
8013         struct tg3 *tp = netdev_priv(dev);
8014         int irq_sync = 0, err = 0;
8015   
8016         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8017             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8018             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8019                 return -EINVAL;
8020   
8021         if (netif_running(dev)) {
8022                 tg3_netif_stop(tp);
8023                 irq_sync = 1;
8024         }
8025
8026         tg3_full_lock(tp, irq_sync);
8027   
8028         tp->rx_pending = ering->rx_pending;
8029
8030         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8031             tp->rx_pending > 63)
8032                 tp->rx_pending = 63;
8033         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8034         tp->tx_pending = ering->tx_pending;
8035
8036         if (netif_running(dev)) {
8037                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8038                 err = tg3_restart_hw(tp, 1);
8039                 if (!err)
8040                         tg3_netif_start(tp);
8041         }
8042
8043         tg3_full_unlock(tp);
8044   
8045         return err;
8046 }
8047   
8048 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8049 {
8050         struct tg3 *tp = netdev_priv(dev);
8051   
8052         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8053         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8054         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8055 }
8056   
8057 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8058 {
8059         struct tg3 *tp = netdev_priv(dev);
8060         int irq_sync = 0, err = 0;
8061   
8062         if (netif_running(dev)) {
8063                 tg3_netif_stop(tp);
8064                 irq_sync = 1;
8065         }
8066
8067         tg3_full_lock(tp, irq_sync);
8068
8069         if (epause->autoneg)
8070                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8071         else
8072                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8073         if (epause->rx_pause)
8074                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8075         else
8076                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8077         if (epause->tx_pause)
8078                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8079         else
8080                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8081
8082         if (netif_running(dev)) {
8083                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8084                 err = tg3_restart_hw(tp, 1);
8085                 if (!err)
8086                         tg3_netif_start(tp);
8087         }
8088
8089         tg3_full_unlock(tp);
8090   
8091         return err;
8092 }
8093   
8094 static u32 tg3_get_rx_csum(struct net_device *dev)
8095 {
8096         struct tg3 *tp = netdev_priv(dev);
8097         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8098 }
8099   
8100 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8101 {
8102         struct tg3 *tp = netdev_priv(dev);
8103   
8104         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8105                 if (data != 0)
8106                         return -EINVAL;
8107                 return 0;
8108         }
8109   
8110         spin_lock_bh(&tp->lock);
8111         if (data)
8112                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8113         else
8114                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8115         spin_unlock_bh(&tp->lock);
8116   
8117         return 0;
8118 }
8119   
8120 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8121 {
8122         struct tg3 *tp = netdev_priv(dev);
8123   
8124         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8125                 if (data != 0)
8126                         return -EINVAL;
8127                 return 0;
8128         }
8129   
8130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8131             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8132                 ethtool_op_set_tx_hw_csum(dev, data);
8133         else
8134                 ethtool_op_set_tx_csum(dev, data);
8135
8136         return 0;
8137 }
8138
8139 static int tg3_get_stats_count (struct net_device *dev)
8140 {
8141         return TG3_NUM_STATS;
8142 }
8143
8144 static int tg3_get_test_count (struct net_device *dev)
8145 {
8146         return TG3_NUM_TEST;
8147 }
8148
8149 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8150 {
8151         switch (stringset) {
8152         case ETH_SS_STATS:
8153                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8154                 break;
8155         case ETH_SS_TEST:
8156                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8157                 break;
8158         default:
8159                 WARN_ON(1);     /* we need a WARN() */
8160                 break;
8161         }
8162 }
8163
8164 static int tg3_phys_id(struct net_device *dev, u32 data)
8165 {
8166         struct tg3 *tp = netdev_priv(dev);
8167         int i;
8168
8169         if (!netif_running(tp->dev))
8170                 return -EAGAIN;
8171
8172         if (data == 0)
8173                 data = 2;
8174
8175         for (i = 0; i < (data * 2); i++) {
8176                 if ((i % 2) == 0)
8177                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8178                                            LED_CTRL_1000MBPS_ON |
8179                                            LED_CTRL_100MBPS_ON |
8180                                            LED_CTRL_10MBPS_ON |
8181                                            LED_CTRL_TRAFFIC_OVERRIDE |
8182                                            LED_CTRL_TRAFFIC_BLINK |
8183                                            LED_CTRL_TRAFFIC_LED);
8184         
8185                 else
8186                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8187                                            LED_CTRL_TRAFFIC_OVERRIDE);
8188
8189                 if (msleep_interruptible(500))
8190                         break;
8191         }
8192         tw32(MAC_LED_CTRL, tp->led_ctrl);
8193         return 0;
8194 }
8195
8196 static void tg3_get_ethtool_stats (struct net_device *dev,
8197                                    struct ethtool_stats *estats, u64 *tmp_stats)
8198 {
8199         struct tg3 *tp = netdev_priv(dev);
8200         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8201 }
8202
8203 #define NVRAM_TEST_SIZE 0x100
8204 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8205
8206 static int tg3_test_nvram(struct tg3 *tp)
8207 {
8208         u32 *buf, csum, magic;
8209         int i, j, err = 0, size;
8210
8211         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8212                 return -EIO;
8213
8214         if (magic == TG3_EEPROM_MAGIC)
8215                 size = NVRAM_TEST_SIZE;
8216         else if ((magic & 0xff000000) == 0xa5000000) {
8217                 if ((magic & 0xe00000) == 0x200000)
8218                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8219                 else
8220                         return 0;
8221         } else
8222                 return -EIO;
8223
8224         buf = kmalloc(size, GFP_KERNEL);
8225         if (buf == NULL)
8226                 return -ENOMEM;
8227
8228         err = -EIO;
8229         for (i = 0, j = 0; i < size; i += 4, j++) {
8230                 u32 val;
8231
8232                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8233                         break;
8234                 buf[j] = cpu_to_le32(val);
8235         }
8236         if (i < size)
8237                 goto out;
8238
8239         /* Selfboot format */
8240         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8241                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8242
8243                 for (i = 0; i < size; i++)
8244                         csum8 += buf8[i];
8245
8246                 if (csum8 == 0) {
8247                         err = 0;
8248                         goto out;
8249                 }
8250
8251                 err = -EIO;
8252                 goto out;
8253         }
8254
8255         /* Bootstrap checksum at offset 0x10 */
8256         csum = calc_crc((unsigned char *) buf, 0x10);
8257         if(csum != cpu_to_le32(buf[0x10/4]))
8258                 goto out;
8259
8260         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8261         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8262         if (csum != cpu_to_le32(buf[0xfc/4]))
8263                  goto out;
8264
8265         err = 0;
8266
8267 out:
8268         kfree(buf);
8269         return err;
8270 }
8271
8272 #define TG3_SERDES_TIMEOUT_SEC  2
8273 #define TG3_COPPER_TIMEOUT_SEC  6
8274
8275 static int tg3_test_link(struct tg3 *tp)
8276 {
8277         int i, max;
8278
8279         if (!netif_running(tp->dev))
8280                 return -ENODEV;
8281
8282         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8283                 max = TG3_SERDES_TIMEOUT_SEC;
8284         else
8285                 max = TG3_COPPER_TIMEOUT_SEC;
8286
8287         for (i = 0; i < max; i++) {
8288                 if (netif_carrier_ok(tp->dev))
8289                         return 0;
8290
8291                 if (msleep_interruptible(1000))
8292                         break;
8293         }
8294
8295         return -EIO;
8296 }
8297
8298 /* Only test the commonly used registers */
8299 static int tg3_test_registers(struct tg3 *tp)
8300 {
8301         int i, is_5705;
8302         u32 offset, read_mask, write_mask, val, save_val, read_val;
8303         static struct {
8304                 u16 offset;
8305                 u16 flags;
8306 #define TG3_FL_5705     0x1
8307 #define TG3_FL_NOT_5705 0x2
8308 #define TG3_FL_NOT_5788 0x4
8309                 u32 read_mask;
8310                 u32 write_mask;
8311         } reg_tbl[] = {
8312                 /* MAC Control Registers */
8313                 { MAC_MODE, TG3_FL_NOT_5705,
8314                         0x00000000, 0x00ef6f8c },
8315                 { MAC_MODE, TG3_FL_5705,
8316                         0x00000000, 0x01ef6b8c },
8317                 { MAC_STATUS, TG3_FL_NOT_5705,
8318                         0x03800107, 0x00000000 },
8319                 { MAC_STATUS, TG3_FL_5705,
8320                         0x03800100, 0x00000000 },
8321                 { MAC_ADDR_0_HIGH, 0x0000,
8322                         0x00000000, 0x0000ffff },
8323                 { MAC_ADDR_0_LOW, 0x0000,
8324                         0x00000000, 0xffffffff },
8325                 { MAC_RX_MTU_SIZE, 0x0000,
8326                         0x00000000, 0x0000ffff },
8327                 { MAC_TX_MODE, 0x0000,
8328                         0x00000000, 0x00000070 },
8329                 { MAC_TX_LENGTHS, 0x0000,
8330                         0x00000000, 0x00003fff },
8331                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8332                         0x00000000, 0x000007fc },
8333                 { MAC_RX_MODE, TG3_FL_5705,
8334                         0x00000000, 0x000007dc },
8335                 { MAC_HASH_REG_0, 0x0000,
8336                         0x00000000, 0xffffffff },
8337                 { MAC_HASH_REG_1, 0x0000,
8338                         0x00000000, 0xffffffff },
8339                 { MAC_HASH_REG_2, 0x0000,
8340                         0x00000000, 0xffffffff },
8341                 { MAC_HASH_REG_3, 0x0000,
8342                         0x00000000, 0xffffffff },
8343
8344                 /* Receive Data and Receive BD Initiator Control Registers. */
8345                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8346                         0x00000000, 0xffffffff },
8347                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8348                         0x00000000, 0xffffffff },
8349                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8350                         0x00000000, 0x00000003 },
8351                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8352                         0x00000000, 0xffffffff },
8353                 { RCVDBDI_STD_BD+0, 0x0000,
8354                         0x00000000, 0xffffffff },
8355                 { RCVDBDI_STD_BD+4, 0x0000,
8356                         0x00000000, 0xffffffff },
8357                 { RCVDBDI_STD_BD+8, 0x0000,
8358                         0x00000000, 0xffff0002 },
8359                 { RCVDBDI_STD_BD+0xc, 0x0000,
8360                         0x00000000, 0xffffffff },
8361         
8362                 /* Receive BD Initiator Control Registers. */
8363                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8364                         0x00000000, 0xffffffff },
8365                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8366                         0x00000000, 0x000003ff },
8367                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8368                         0x00000000, 0xffffffff },
8369         
8370                 /* Host Coalescing Control Registers. */
8371                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8372                         0x00000000, 0x00000004 },
8373                 { HOSTCC_MODE, TG3_FL_5705,
8374                         0x00000000, 0x000000f6 },
8375                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8376                         0x00000000, 0xffffffff },
8377                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8378                         0x00000000, 0x000003ff },
8379                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8380                         0x00000000, 0xffffffff },
8381                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8382                         0x00000000, 0x000003ff },
8383                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8384                         0x00000000, 0xffffffff },
8385                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8386                         0x00000000, 0x000000ff },
8387                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8388                         0x00000000, 0xffffffff },
8389                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8390                         0x00000000, 0x000000ff },
8391                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8392                         0x00000000, 0xffffffff },
8393                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8394                         0x00000000, 0xffffffff },
8395                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8396                         0x00000000, 0xffffffff },
8397                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8398                         0x00000000, 0x000000ff },
8399                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8400                         0x00000000, 0xffffffff },
8401                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8402                         0x00000000, 0x000000ff },
8403                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8404                         0x00000000, 0xffffffff },
8405                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8406                         0x00000000, 0xffffffff },
8407                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8408                         0x00000000, 0xffffffff },
8409                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8410                         0x00000000, 0xffffffff },
8411                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8412                         0x00000000, 0xffffffff },
8413                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8414                         0xffffffff, 0x00000000 },
8415                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8416                         0xffffffff, 0x00000000 },
8417
8418                 /* Buffer Manager Control Registers. */
8419                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8420                         0x00000000, 0x007fff80 },
8421                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8422                         0x00000000, 0x007fffff },
8423                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8424                         0x00000000, 0x0000003f },
8425                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8426                         0x00000000, 0x000001ff },
8427                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8428                         0x00000000, 0x000001ff },
8429                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8430                         0xffffffff, 0x00000000 },
8431                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8432                         0xffffffff, 0x00000000 },
8433         
8434                 /* Mailbox Registers */
8435                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8436                         0x00000000, 0x000001ff },
8437                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8438                         0x00000000, 0x000001ff },
8439                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8440                         0x00000000, 0x000007ff },
8441                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8442                         0x00000000, 0x000001ff },
8443
8444                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8445         };
8446
8447         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8448                 is_5705 = 1;
8449         else
8450                 is_5705 = 0;
8451
8452         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8453                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8454                         continue;
8455
8456                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8457                         continue;
8458
8459                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8460                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8461                         continue;
8462
8463                 offset = (u32) reg_tbl[i].offset;
8464                 read_mask = reg_tbl[i].read_mask;
8465                 write_mask = reg_tbl[i].write_mask;
8466
8467                 /* Save the original register content */
8468                 save_val = tr32(offset);
8469
8470                 /* Determine the read-only value. */
8471                 read_val = save_val & read_mask;
8472
8473                 /* Write zero to the register, then make sure the read-only bits
8474                  * are not changed and the read/write bits are all zeros.
8475                  */
8476                 tw32(offset, 0);
8477
8478                 val = tr32(offset);
8479
8480                 /* Test the read-only and read/write bits. */
8481                 if (((val & read_mask) != read_val) || (val & write_mask))
8482                         goto out;
8483
8484                 /* Write ones to all the bits defined by RdMask and WrMask, then
8485                  * make sure the read-only bits are not changed and the
8486                  * read/write bits are all ones.
8487                  */
8488                 tw32(offset, read_mask | write_mask);
8489
8490                 val = tr32(offset);
8491
8492                 /* Test the read-only bits. */
8493                 if ((val & read_mask) != read_val)
8494                         goto out;
8495
8496                 /* Test the read/write bits. */
8497                 if ((val & write_mask) != write_mask)
8498                         goto out;
8499
8500                 tw32(offset, save_val);
8501         }
8502
8503         return 0;
8504
8505 out:
8506         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8507         tw32(offset, save_val);
8508         return -EIO;
8509 }
8510
8511 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8512 {
8513         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8514         int i;
8515         u32 j;
8516
8517         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8518                 for (j = 0; j < len; j += 4) {
8519                         u32 val;
8520
8521                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8522                         tg3_read_mem(tp, offset + j, &val);
8523                         if (val != test_pattern[i])
8524                                 return -EIO;
8525                 }
8526         }
8527         return 0;
8528 }
8529
8530 static int tg3_test_memory(struct tg3 *tp)
8531 {
8532         static struct mem_entry {
8533                 u32 offset;
8534                 u32 len;
8535         } mem_tbl_570x[] = {
8536                 { 0x00000000, 0x00b50},
8537                 { 0x00002000, 0x1c000},
8538                 { 0xffffffff, 0x00000}
8539         }, mem_tbl_5705[] = {
8540                 { 0x00000100, 0x0000c},
8541                 { 0x00000200, 0x00008},
8542                 { 0x00004000, 0x00800},
8543                 { 0x00006000, 0x01000},
8544                 { 0x00008000, 0x02000},
8545                 { 0x00010000, 0x0e000},
8546                 { 0xffffffff, 0x00000}
8547         }, mem_tbl_5755[] = {
8548                 { 0x00000200, 0x00008},
8549                 { 0x00004000, 0x00800},
8550                 { 0x00006000, 0x00800},
8551                 { 0x00008000, 0x02000},
8552                 { 0x00010000, 0x0c000},
8553                 { 0xffffffff, 0x00000}
8554         };
8555         struct mem_entry *mem_tbl;
8556         int err = 0;
8557         int i;
8558
8559         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8560                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8561                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8562                         mem_tbl = mem_tbl_5755;
8563                 else
8564                         mem_tbl = mem_tbl_5705;
8565         } else
8566                 mem_tbl = mem_tbl_570x;
8567
8568         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8569                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8570                     mem_tbl[i].len)) != 0)
8571                         break;
8572         }
8573         
8574         return err;
8575 }
8576
8577 #define TG3_MAC_LOOPBACK        0
8578 #define TG3_PHY_LOOPBACK        1
8579
8580 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8581 {
8582         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8583         u32 desc_idx;
8584         struct sk_buff *skb, *rx_skb;
8585         u8 *tx_data;
8586         dma_addr_t map;
8587         int num_pkts, tx_len, rx_len, i, err;
8588         struct tg3_rx_buffer_desc *desc;
8589
8590         if (loopback_mode == TG3_MAC_LOOPBACK) {
8591                 /* HW errata - mac loopback fails in some cases on 5780.
8592                  * Normal traffic and PHY loopback are not affected by
8593                  * errata.
8594                  */
8595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8596                         return 0;
8597
8598                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8599                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8600                            MAC_MODE_PORT_MODE_GMII;
8601                 tw32(MAC_MODE, mac_mode);
8602         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8603                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8604                                            BMCR_SPEED1000);
8605                 udelay(40);
8606                 /* reset to prevent losing 1st rx packet intermittently */
8607                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8608                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8609                         udelay(10);
8610                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8611                 }
8612                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8613                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8614                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8615                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8616                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8617                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8618                 }
8619                 tw32(MAC_MODE, mac_mode);
8620         }
8621         else
8622                 return -EINVAL;
8623
8624         err = -EIO;
8625
8626         tx_len = 1514;
8627         skb = netdev_alloc_skb(tp->dev, tx_len);
8628         if (!skb)
8629                 return -ENOMEM;
8630
8631         tx_data = skb_put(skb, tx_len);
8632         memcpy(tx_data, tp->dev->dev_addr, 6);
8633         memset(tx_data + 6, 0x0, 8);
8634
8635         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8636
8637         for (i = 14; i < tx_len; i++)
8638                 tx_data[i] = (u8) (i & 0xff);
8639
8640         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8641
8642         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8643              HOSTCC_MODE_NOW);
8644
8645         udelay(10);
8646
8647         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8648
8649         num_pkts = 0;
8650
8651         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8652
8653         tp->tx_prod++;
8654         num_pkts++;
8655
8656         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8657                      tp->tx_prod);
8658         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8659
8660         udelay(10);
8661
8662         for (i = 0; i < 10; i++) {
8663                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8664                        HOSTCC_MODE_NOW);
8665
8666                 udelay(10);
8667
8668                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8669                 rx_idx = tp->hw_status->idx[0].rx_producer;
8670                 if ((tx_idx == tp->tx_prod) &&
8671                     (rx_idx == (rx_start_idx + num_pkts)))
8672                         break;
8673         }
8674
8675         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8676         dev_kfree_skb(skb);
8677
8678         if (tx_idx != tp->tx_prod)
8679                 goto out;
8680
8681         if (rx_idx != rx_start_idx + num_pkts)
8682                 goto out;
8683
8684         desc = &tp->rx_rcb[rx_start_idx];
8685         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8686         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8687         if (opaque_key != RXD_OPAQUE_RING_STD)
8688                 goto out;
8689
8690         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8691             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8692                 goto out;
8693
8694         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8695         if (rx_len != tx_len)
8696                 goto out;
8697
8698         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8699
8700         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8701         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8702
8703         for (i = 14; i < tx_len; i++) {
8704                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8705                         goto out;
8706         }
8707         err = 0;
8708         
8709         /* tg3_free_rings will unmap and free the rx_skb */
8710 out:
8711         return err;
8712 }
8713
8714 #define TG3_MAC_LOOPBACK_FAILED         1
8715 #define TG3_PHY_LOOPBACK_FAILED         2
8716 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8717                                          TG3_PHY_LOOPBACK_FAILED)
8718
8719 static int tg3_test_loopback(struct tg3 *tp)
8720 {
8721         int err = 0;
8722
8723         if (!netif_running(tp->dev))
8724                 return TG3_LOOPBACK_FAILED;
8725
8726         err = tg3_reset_hw(tp, 1);
8727         if (err)
8728                 return TG3_LOOPBACK_FAILED;
8729
8730         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8731                 err |= TG3_MAC_LOOPBACK_FAILED;
8732         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8733                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8734                         err |= TG3_PHY_LOOPBACK_FAILED;
8735         }
8736
8737         return err;
8738 }
8739
8740 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8741                           u64 *data)
8742 {
8743         struct tg3 *tp = netdev_priv(dev);
8744
8745         if (tp->link_config.phy_is_low_power)
8746                 tg3_set_power_state(tp, PCI_D0);
8747
8748         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8749
8750         if (tg3_test_nvram(tp) != 0) {
8751                 etest->flags |= ETH_TEST_FL_FAILED;
8752                 data[0] = 1;
8753         }
8754         if (tg3_test_link(tp) != 0) {
8755                 etest->flags |= ETH_TEST_FL_FAILED;
8756                 data[1] = 1;
8757         }
8758         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8759                 int err, irq_sync = 0;
8760
8761                 if (netif_running(dev)) {
8762                         tg3_netif_stop(tp);
8763                         irq_sync = 1;
8764                 }
8765
8766                 tg3_full_lock(tp, irq_sync);
8767
8768                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8769                 err = tg3_nvram_lock(tp);
8770                 tg3_halt_cpu(tp, RX_CPU_BASE);
8771                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8772                         tg3_halt_cpu(tp, TX_CPU_BASE);
8773                 if (!err)
8774                         tg3_nvram_unlock(tp);
8775
8776                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8777                         tg3_phy_reset(tp);
8778
8779                 if (tg3_test_registers(tp) != 0) {
8780                         etest->flags |= ETH_TEST_FL_FAILED;
8781                         data[2] = 1;
8782                 }
8783                 if (tg3_test_memory(tp) != 0) {
8784                         etest->flags |= ETH_TEST_FL_FAILED;
8785                         data[3] = 1;
8786                 }
8787                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8788                         etest->flags |= ETH_TEST_FL_FAILED;
8789
8790                 tg3_full_unlock(tp);
8791
8792                 if (tg3_test_interrupt(tp) != 0) {
8793                         etest->flags |= ETH_TEST_FL_FAILED;
8794                         data[5] = 1;
8795                 }
8796
8797                 tg3_full_lock(tp, 0);
8798
8799                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8800                 if (netif_running(dev)) {
8801                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8802                         if (!tg3_restart_hw(tp, 1))
8803                                 tg3_netif_start(tp);
8804                 }
8805
8806                 tg3_full_unlock(tp);
8807         }
8808         if (tp->link_config.phy_is_low_power)
8809                 tg3_set_power_state(tp, PCI_D3hot);
8810
8811 }
8812
8813 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8814 {
8815         struct mii_ioctl_data *data = if_mii(ifr);
8816         struct tg3 *tp = netdev_priv(dev);
8817         int err;
8818
8819         switch(cmd) {
8820         case SIOCGMIIPHY:
8821                 data->phy_id = PHY_ADDR;
8822
8823                 /* fallthru */
8824         case SIOCGMIIREG: {
8825                 u32 mii_regval;
8826
8827                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8828                         break;                  /* We have no PHY */
8829
8830                 if (tp->link_config.phy_is_low_power)
8831                         return -EAGAIN;
8832
8833                 spin_lock_bh(&tp->lock);
8834                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8835                 spin_unlock_bh(&tp->lock);
8836
8837                 data->val_out = mii_regval;
8838
8839                 return err;
8840         }
8841
8842         case SIOCSMIIREG:
8843                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8844                         break;                  /* We have no PHY */
8845
8846                 if (!capable(CAP_NET_ADMIN))
8847                         return -EPERM;
8848
8849                 if (tp->link_config.phy_is_low_power)
8850                         return -EAGAIN;
8851
8852                 spin_lock_bh(&tp->lock);
8853                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8854                 spin_unlock_bh(&tp->lock);
8855
8856                 return err;
8857
8858         default:
8859                 /* do nothing */
8860                 break;
8861         }
8862         return -EOPNOTSUPP;
8863 }
8864
8865 #if TG3_VLAN_TAG_USED
8866 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8867 {
8868         struct tg3 *tp = netdev_priv(dev);
8869
8870         if (netif_running(dev))
8871                 tg3_netif_stop(tp);
8872
8873         tg3_full_lock(tp, 0);
8874
8875         tp->vlgrp = grp;
8876
8877         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8878         __tg3_set_rx_mode(dev);
8879
8880         tg3_full_unlock(tp);
8881
8882         if (netif_running(dev))
8883                 tg3_netif_start(tp);
8884 }
8885
8886 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8887 {
8888         struct tg3 *tp = netdev_priv(dev);
8889
8890         if (netif_running(dev))
8891                 tg3_netif_stop(tp);
8892
8893         tg3_full_lock(tp, 0);
8894         if (tp->vlgrp)
8895                 tp->vlgrp->vlan_devices[vid] = NULL;
8896         tg3_full_unlock(tp);
8897
8898         if (netif_running(dev))
8899                 tg3_netif_start(tp);
8900 }
8901 #endif
8902
8903 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8904 {
8905         struct tg3 *tp = netdev_priv(dev);
8906
8907         memcpy(ec, &tp->coal, sizeof(*ec));
8908         return 0;
8909 }
8910
8911 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8912 {
8913         struct tg3 *tp = netdev_priv(dev);
8914         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8915         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8916
8917         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8918                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8919                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8920                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8921                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8922         }
8923
8924         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8925             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8926             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8927             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8928             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8929             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8930             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8931             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8932             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8933             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8934                 return -EINVAL;
8935
8936         /* No rx interrupts will be generated if both are zero */
8937         if ((ec->rx_coalesce_usecs == 0) &&
8938             (ec->rx_max_coalesced_frames == 0))
8939                 return -EINVAL;
8940
8941         /* No tx interrupts will be generated if both are zero */
8942         if ((ec->tx_coalesce_usecs == 0) &&
8943             (ec->tx_max_coalesced_frames == 0))
8944                 return -EINVAL;
8945
8946         /* Only copy relevant parameters, ignore all others. */
8947         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8948         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8949         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8950         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8951         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8952         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8953         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8954         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8955         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8956
8957         if (netif_running(dev)) {
8958                 tg3_full_lock(tp, 0);
8959                 __tg3_set_coalesce(tp, &tp->coal);
8960                 tg3_full_unlock(tp);
8961         }
8962         return 0;
8963 }
8964
8965 static struct ethtool_ops tg3_ethtool_ops = {
8966         .get_settings           = tg3_get_settings,
8967         .set_settings           = tg3_set_settings,
8968         .get_drvinfo            = tg3_get_drvinfo,
8969         .get_regs_len           = tg3_get_regs_len,
8970         .get_regs               = tg3_get_regs,
8971         .get_wol                = tg3_get_wol,
8972         .set_wol                = tg3_set_wol,
8973         .get_msglevel           = tg3_get_msglevel,
8974         .set_msglevel           = tg3_set_msglevel,
8975         .nway_reset             = tg3_nway_reset,
8976         .get_link               = ethtool_op_get_link,
8977         .get_eeprom_len         = tg3_get_eeprom_len,
8978         .get_eeprom             = tg3_get_eeprom,
8979         .set_eeprom             = tg3_set_eeprom,
8980         .get_ringparam          = tg3_get_ringparam,
8981         .set_ringparam          = tg3_set_ringparam,
8982         .get_pauseparam         = tg3_get_pauseparam,
8983         .set_pauseparam         = tg3_set_pauseparam,
8984         .get_rx_csum            = tg3_get_rx_csum,
8985         .set_rx_csum            = tg3_set_rx_csum,
8986         .get_tx_csum            = ethtool_op_get_tx_csum,
8987         .set_tx_csum            = tg3_set_tx_csum,
8988         .get_sg                 = ethtool_op_get_sg,
8989         .set_sg                 = ethtool_op_set_sg,
8990 #if TG3_TSO_SUPPORT != 0
8991         .get_tso                = ethtool_op_get_tso,
8992         .set_tso                = tg3_set_tso,
8993 #endif
8994         .self_test_count        = tg3_get_test_count,
8995         .self_test              = tg3_self_test,
8996         .get_strings            = tg3_get_strings,
8997         .phys_id                = tg3_phys_id,
8998         .get_stats_count        = tg3_get_stats_count,
8999         .get_ethtool_stats      = tg3_get_ethtool_stats,
9000         .get_coalesce           = tg3_get_coalesce,
9001         .set_coalesce           = tg3_set_coalesce,
9002         .get_perm_addr          = ethtool_op_get_perm_addr,
9003 };
9004
9005 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9006 {
9007         u32 cursize, val, magic;
9008
9009         tp->nvram_size = EEPROM_CHIP_SIZE;
9010
9011         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9012                 return;
9013
9014         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9015                 return;
9016
9017         /*
9018          * Size the chip by reading offsets at increasing powers of two.
9019          * When we encounter our validation signature, we know the addressing
9020          * has wrapped around, and thus have our chip size.
9021          */
9022         cursize = 0x10;
9023
9024         while (cursize < tp->nvram_size) {
9025                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9026                         return;
9027
9028                 if (val == magic)
9029                         break;
9030
9031                 cursize <<= 1;
9032         }
9033
9034         tp->nvram_size = cursize;
9035 }
9036                 
9037 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9038 {
9039         u32 val;
9040
9041         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9042                 return;
9043
9044         /* Selfboot format */
9045         if (val != TG3_EEPROM_MAGIC) {
9046                 tg3_get_eeprom_size(tp);
9047                 return;
9048         }
9049
9050         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9051                 if (val != 0) {
9052                         tp->nvram_size = (val >> 16) * 1024;
9053                         return;
9054                 }
9055         }
9056         tp->nvram_size = 0x20000;
9057 }
9058
9059 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9060 {
9061         u32 nvcfg1;
9062
9063         nvcfg1 = tr32(NVRAM_CFG1);
9064         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9065                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9066         }
9067         else {
9068                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9069                 tw32(NVRAM_CFG1, nvcfg1);
9070         }
9071
9072         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9073             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9074                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9075                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9076                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9077                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9078                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9079                                 break;
9080                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9081                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9082                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9083                                 break;
9084                         case FLASH_VENDOR_ATMEL_EEPROM:
9085                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9086                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9087                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9088                                 break;
9089                         case FLASH_VENDOR_ST:
9090                                 tp->nvram_jedecnum = JEDEC_ST;
9091                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9092                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9093                                 break;
9094                         case FLASH_VENDOR_SAIFUN:
9095                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9096                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9097                                 break;
9098                         case FLASH_VENDOR_SST_SMALL:
9099                         case FLASH_VENDOR_SST_LARGE:
9100                                 tp->nvram_jedecnum = JEDEC_SST;
9101                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9102                                 break;
9103                 }
9104         }
9105         else {
9106                 tp->nvram_jedecnum = JEDEC_ATMEL;
9107                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9108                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9109         }
9110 }
9111
9112 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9113 {
9114         u32 nvcfg1;
9115
9116         nvcfg1 = tr32(NVRAM_CFG1);
9117
9118         /* NVRAM protection for TPM */
9119         if (nvcfg1 & (1 << 27))
9120                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9121
9122         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9123                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9124                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9125                         tp->nvram_jedecnum = JEDEC_ATMEL;
9126                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9127                         break;
9128                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9129                         tp->nvram_jedecnum = JEDEC_ATMEL;
9130                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9131                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9132                         break;
9133                 case FLASH_5752VENDOR_ST_M45PE10:
9134                 case FLASH_5752VENDOR_ST_M45PE20:
9135                 case FLASH_5752VENDOR_ST_M45PE40:
9136                         tp->nvram_jedecnum = JEDEC_ST;
9137                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9138                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9139                         break;
9140         }
9141
9142         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9143                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9144                         case FLASH_5752PAGE_SIZE_256:
9145                                 tp->nvram_pagesize = 256;
9146                                 break;
9147                         case FLASH_5752PAGE_SIZE_512:
9148                                 tp->nvram_pagesize = 512;
9149                                 break;
9150                         case FLASH_5752PAGE_SIZE_1K:
9151                                 tp->nvram_pagesize = 1024;
9152                                 break;
9153                         case FLASH_5752PAGE_SIZE_2K:
9154                                 tp->nvram_pagesize = 2048;
9155                                 break;
9156                         case FLASH_5752PAGE_SIZE_4K:
9157                                 tp->nvram_pagesize = 4096;
9158                                 break;
9159                         case FLASH_5752PAGE_SIZE_264:
9160                                 tp->nvram_pagesize = 264;
9161                                 break;
9162                 }
9163         }
9164         else {
9165                 /* For eeprom, set pagesize to maximum eeprom size */
9166                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9167
9168                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9169                 tw32(NVRAM_CFG1, nvcfg1);
9170         }
9171 }
9172
9173 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9174 {
9175         u32 nvcfg1;
9176
9177         nvcfg1 = tr32(NVRAM_CFG1);
9178
9179         /* NVRAM protection for TPM */
9180         if (nvcfg1 & (1 << 27))
9181                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9182
9183         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9184                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9185                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9186                         tp->nvram_jedecnum = JEDEC_ATMEL;
9187                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9188                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9189
9190                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9191                         tw32(NVRAM_CFG1, nvcfg1);
9192                         break;
9193                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9194                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9195                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9196                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9197                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9198                         tp->nvram_jedecnum = JEDEC_ATMEL;
9199                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9200                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9201                         tp->nvram_pagesize = 264;
9202                         break;
9203                 case FLASH_5752VENDOR_ST_M45PE10:
9204                 case FLASH_5752VENDOR_ST_M45PE20:
9205                 case FLASH_5752VENDOR_ST_M45PE40:
9206                         tp->nvram_jedecnum = JEDEC_ST;
9207                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9208                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9209                         tp->nvram_pagesize = 256;
9210                         break;
9211         }
9212 }
9213
9214 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9215 {
9216         u32 nvcfg1;
9217
9218         nvcfg1 = tr32(NVRAM_CFG1);
9219
9220         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9221                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9222                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9223                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9224                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9225                         tp->nvram_jedecnum = JEDEC_ATMEL;
9226                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9227                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9228
9229                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9230                         tw32(NVRAM_CFG1, nvcfg1);
9231                         break;
9232                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9233                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9234                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9235                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9236                         tp->nvram_jedecnum = JEDEC_ATMEL;
9237                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9238                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9239                         tp->nvram_pagesize = 264;
9240                         break;
9241                 case FLASH_5752VENDOR_ST_M45PE10:
9242                 case FLASH_5752VENDOR_ST_M45PE20:
9243                 case FLASH_5752VENDOR_ST_M45PE40:
9244                         tp->nvram_jedecnum = JEDEC_ST;
9245                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9246                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9247                         tp->nvram_pagesize = 256;
9248                         break;
9249         }
9250 }
9251
9252 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9253 static void __devinit tg3_nvram_init(struct tg3 *tp)
9254 {
9255         int j;
9256
9257         tw32_f(GRC_EEPROM_ADDR,
9258              (EEPROM_ADDR_FSM_RESET |
9259               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9260                EEPROM_ADDR_CLKPERD_SHIFT)));
9261
9262         /* XXX schedule_timeout() ... */
9263         for (j = 0; j < 100; j++)
9264                 udelay(10);
9265
9266         /* Enable seeprom accesses. */
9267         tw32_f(GRC_LOCAL_CTRL,
9268              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9269         udelay(100);
9270
9271         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9272             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9273                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9274
9275                 if (tg3_nvram_lock(tp)) {
9276                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9277                                "tg3_nvram_init failed.\n", tp->dev->name);
9278                         return;
9279                 }
9280                 tg3_enable_nvram_access(tp);
9281
9282                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9283                         tg3_get_5752_nvram_info(tp);
9284                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9285                         tg3_get_5755_nvram_info(tp);
9286                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9287                         tg3_get_5787_nvram_info(tp);
9288                 else
9289                         tg3_get_nvram_info(tp);
9290
9291                 tg3_get_nvram_size(tp);
9292
9293                 tg3_disable_nvram_access(tp);
9294                 tg3_nvram_unlock(tp);
9295
9296         } else {
9297                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9298
9299                 tg3_get_eeprom_size(tp);
9300         }
9301 }
9302
9303 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9304                                         u32 offset, u32 *val)
9305 {
9306         u32 tmp;
9307         int i;
9308
9309         if (offset > EEPROM_ADDR_ADDR_MASK ||
9310             (offset % 4) != 0)
9311                 return -EINVAL;
9312
9313         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9314                                         EEPROM_ADDR_DEVID_MASK |
9315                                         EEPROM_ADDR_READ);
9316         tw32(GRC_EEPROM_ADDR,
9317              tmp |
9318              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9319              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9320               EEPROM_ADDR_ADDR_MASK) |
9321              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9322
9323         for (i = 0; i < 10000; i++) {
9324                 tmp = tr32(GRC_EEPROM_ADDR);
9325
9326                 if (tmp & EEPROM_ADDR_COMPLETE)
9327                         break;
9328                 udelay(100);
9329         }
9330         if (!(tmp & EEPROM_ADDR_COMPLETE))
9331                 return -EBUSY;
9332
9333         *val = tr32(GRC_EEPROM_DATA);
9334         return 0;
9335 }
9336
9337 #define NVRAM_CMD_TIMEOUT 10000
9338
9339 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9340 {
9341         int i;
9342
9343         tw32(NVRAM_CMD, nvram_cmd);
9344         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9345                 udelay(10);
9346                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9347                         udelay(10);
9348                         break;
9349                 }
9350         }
9351         if (i == NVRAM_CMD_TIMEOUT) {
9352                 return -EBUSY;
9353         }
9354         return 0;
9355 }
9356
9357 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9358 {
9359         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9360             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9361             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9362             (tp->nvram_jedecnum == JEDEC_ATMEL))
9363
9364                 addr = ((addr / tp->nvram_pagesize) <<
9365                         ATMEL_AT45DB0X1B_PAGE_POS) +
9366                        (addr % tp->nvram_pagesize);
9367
9368         return addr;
9369 }
9370
9371 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9372 {
9373         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9374             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9375             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9376             (tp->nvram_jedecnum == JEDEC_ATMEL))
9377
9378                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9379                         tp->nvram_pagesize) +
9380                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9381
9382         return addr;
9383 }
9384
9385 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9386 {
9387         int ret;
9388
9389         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9390                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9391
9392         offset = tg3_nvram_phys_addr(tp, offset);
9393
9394         if (offset > NVRAM_ADDR_MSK)
9395                 return -EINVAL;
9396
9397         ret = tg3_nvram_lock(tp);
9398         if (ret)
9399                 return ret;
9400
9401         tg3_enable_nvram_access(tp);
9402
9403         tw32(NVRAM_ADDR, offset);
9404         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9405                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9406
9407         if (ret == 0)
9408                 *val = swab32(tr32(NVRAM_RDDATA));
9409
9410         tg3_disable_nvram_access(tp);
9411
9412         tg3_nvram_unlock(tp);
9413
9414         return ret;
9415 }
9416
9417 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9418 {
9419         int err;
9420         u32 tmp;
9421
9422         err = tg3_nvram_read(tp, offset, &tmp);
9423         *val = swab32(tmp);
9424         return err;
9425 }
9426
9427 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9428                                     u32 offset, u32 len, u8 *buf)
9429 {
9430         int i, j, rc = 0;
9431         u32 val;
9432
9433         for (i = 0; i < len; i += 4) {
9434                 u32 addr, data;
9435
9436                 addr = offset + i;
9437
9438                 memcpy(&data, buf + i, 4);
9439
9440                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9441
9442                 val = tr32(GRC_EEPROM_ADDR);
9443                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9444
9445                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9446                         EEPROM_ADDR_READ);
9447                 tw32(GRC_EEPROM_ADDR, val |
9448                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9449                         (addr & EEPROM_ADDR_ADDR_MASK) |
9450                         EEPROM_ADDR_START |
9451                         EEPROM_ADDR_WRITE);
9452                 
9453                 for (j = 0; j < 10000; j++) {
9454                         val = tr32(GRC_EEPROM_ADDR);
9455
9456                         if (val & EEPROM_ADDR_COMPLETE)
9457                                 break;
9458                         udelay(100);
9459                 }
9460                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9461                         rc = -EBUSY;
9462                         break;
9463                 }
9464         }
9465
9466         return rc;
9467 }
9468
9469 /* offset and length are dword aligned */
9470 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9471                 u8 *buf)
9472 {
9473         int ret = 0;
9474         u32 pagesize = tp->nvram_pagesize;
9475         u32 pagemask = pagesize - 1;
9476         u32 nvram_cmd;
9477         u8 *tmp;
9478
9479         tmp = kmalloc(pagesize, GFP_KERNEL);
9480         if (tmp == NULL)
9481                 return -ENOMEM;
9482
9483         while (len) {
9484                 int j;
9485                 u32 phy_addr, page_off, size;
9486
9487                 phy_addr = offset & ~pagemask;
9488         
9489                 for (j = 0; j < pagesize; j += 4) {
9490                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9491                                                 (u32 *) (tmp + j))))
9492                                 break;
9493                 }
9494                 if (ret)
9495                         break;
9496
9497                 page_off = offset & pagemask;
9498                 size = pagesize;
9499                 if (len < size)
9500                         size = len;
9501
9502                 len -= size;
9503
9504                 memcpy(tmp + page_off, buf, size);
9505
9506                 offset = offset + (pagesize - page_off);
9507
9508                 tg3_enable_nvram_access(tp);
9509
9510                 /*
9511                  * Before we can erase the flash page, we need
9512                  * to issue a special "write enable" command.
9513                  */
9514                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9515
9516                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9517                         break;
9518
9519                 /* Erase the target page */
9520                 tw32(NVRAM_ADDR, phy_addr);
9521
9522                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9523                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9524
9525                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9526                         break;
9527
9528                 /* Issue another write enable to start the write. */
9529                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9530
9531                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9532                         break;
9533
9534                 for (j = 0; j < pagesize; j += 4) {
9535                         u32 data;
9536
9537                         data = *((u32 *) (tmp + j));
9538                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9539
9540                         tw32(NVRAM_ADDR, phy_addr + j);
9541
9542                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9543                                 NVRAM_CMD_WR;
9544
9545                         if (j == 0)
9546                                 nvram_cmd |= NVRAM_CMD_FIRST;
9547                         else if (j == (pagesize - 4))
9548                                 nvram_cmd |= NVRAM_CMD_LAST;
9549
9550                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9551                                 break;
9552                 }
9553                 if (ret)
9554                         break;
9555         }
9556
9557         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9558         tg3_nvram_exec_cmd(tp, nvram_cmd);
9559
9560         kfree(tmp);
9561
9562         return ret;
9563 }
9564
9565 /* offset and length are dword aligned */
9566 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9567                 u8 *buf)
9568 {
9569         int i, ret = 0;
9570
9571         for (i = 0; i < len; i += 4, offset += 4) {
9572                 u32 data, page_off, phy_addr, nvram_cmd;
9573
9574                 memcpy(&data, buf + i, 4);
9575                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9576
9577                 page_off = offset % tp->nvram_pagesize;
9578
9579                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9580
9581                 tw32(NVRAM_ADDR, phy_addr);
9582
9583                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9584
9585                 if ((page_off == 0) || (i == 0))
9586                         nvram_cmd |= NVRAM_CMD_FIRST;
9587                 if (page_off == (tp->nvram_pagesize - 4))
9588                         nvram_cmd |= NVRAM_CMD_LAST;
9589
9590                 if (i == (len - 4))
9591                         nvram_cmd |= NVRAM_CMD_LAST;
9592
9593                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9594                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9595                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9596                     (tp->nvram_jedecnum == JEDEC_ST) &&
9597                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9598
9599                         if ((ret = tg3_nvram_exec_cmd(tp,
9600                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9601                                 NVRAM_CMD_DONE)))
9602
9603                                 break;
9604                 }
9605                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9606                         /* We always do complete word writes to eeprom. */
9607                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9608                 }
9609
9610                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9611                         break;
9612         }
9613         return ret;
9614 }
9615
9616 /* offset and length are dword aligned */
9617 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9618 {
9619         int ret;
9620
9621         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9622                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9623                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9624                 udelay(40);
9625         }
9626
9627         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9628                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9629         }
9630         else {
9631                 u32 grc_mode;
9632
9633                 ret = tg3_nvram_lock(tp);
9634                 if (ret)
9635                         return ret;
9636
9637                 tg3_enable_nvram_access(tp);
9638                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9639                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9640                         tw32(NVRAM_WRITE1, 0x406);
9641
9642                 grc_mode = tr32(GRC_MODE);
9643                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9644
9645                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9646                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9647
9648                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9649                                 buf);
9650                 }
9651                 else {
9652                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9653                                 buf);
9654                 }
9655
9656                 grc_mode = tr32(GRC_MODE);
9657                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9658
9659                 tg3_disable_nvram_access(tp);
9660                 tg3_nvram_unlock(tp);
9661         }
9662
9663         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9664                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9665                 udelay(40);
9666         }
9667
9668         return ret;
9669 }
9670
9671 struct subsys_tbl_ent {
9672         u16 subsys_vendor, subsys_devid;
9673         u32 phy_id;
9674 };
9675
9676 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9677         /* Broadcom boards. */
9678         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9679         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9680         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9681         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9682         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9683         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9684         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9685         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9686         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9687         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9688         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9689
9690         /* 3com boards. */
9691         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9692         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9693         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9694         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9695         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9696
9697         /* DELL boards. */
9698         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9699         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9700         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9701         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9702
9703         /* Compaq boards. */
9704         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9705         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9706         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9707         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9708         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9709
9710         /* IBM boards. */
9711         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9712 };
9713
9714 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9715 {
9716         int i;
9717
9718         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9719                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9720                      tp->pdev->subsystem_vendor) &&
9721                     (subsys_id_to_phy_id[i].subsys_devid ==
9722                      tp->pdev->subsystem_device))
9723                         return &subsys_id_to_phy_id[i];
9724         }
9725         return NULL;
9726 }
9727
9728 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9729 {
9730         u32 val;
9731         u16 pmcsr;
9732
9733         /* On some early chips the SRAM cannot be accessed in D3hot state,
9734          * so need make sure we're in D0.
9735          */
9736         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9737         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9738         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9739         msleep(1);
9740
9741         /* Make sure register accesses (indirect or otherwise)
9742          * will function correctly.
9743          */
9744         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9745                                tp->misc_host_ctrl);
9746
9747         /* The memory arbiter has to be enabled in order for SRAM accesses
9748          * to succeed.  Normally on powerup the tg3 chip firmware will make
9749          * sure it is enabled, but other entities such as system netboot
9750          * code might disable it.
9751          */
9752         val = tr32(MEMARB_MODE);
9753         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9754
9755         tp->phy_id = PHY_ID_INVALID;
9756         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9757
9758         /* Assume an onboard device by default.  */
9759         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9760
9761         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9762         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9763                 u32 nic_cfg, led_cfg;
9764                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9765                 int eeprom_phy_serdes = 0;
9766
9767                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9768                 tp->nic_sram_data_cfg = nic_cfg;
9769
9770                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9771                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9772                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9773                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9774                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9775                     (ver > 0) && (ver < 0x100))
9776                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9777
9778                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9779                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9780                         eeprom_phy_serdes = 1;
9781
9782                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9783                 if (nic_phy_id != 0) {
9784                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9785                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9786
9787                         eeprom_phy_id  = (id1 >> 16) << 10;
9788                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9789                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9790                 } else
9791                         eeprom_phy_id = 0;
9792
9793                 tp->phy_id = eeprom_phy_id;
9794                 if (eeprom_phy_serdes) {
9795                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9796                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9797                         else
9798                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9799                 }
9800
9801                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9802                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9803                                     SHASTA_EXT_LED_MODE_MASK);
9804                 else
9805                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9806
9807                 switch (led_cfg) {
9808                 default:
9809                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9810                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9811                         break;
9812
9813                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9814                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9815                         break;
9816
9817                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9818                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9819
9820                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9821                          * read on some older 5700/5701 bootcode.
9822                          */
9823                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9824                             ASIC_REV_5700 ||
9825                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9826                             ASIC_REV_5701)
9827                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9828
9829                         break;
9830
9831                 case SHASTA_EXT_LED_SHARED:
9832                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9833                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9834                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9835                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9836                                                  LED_CTRL_MODE_PHY_2);
9837                         break;
9838
9839                 case SHASTA_EXT_LED_MAC:
9840                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9841                         break;
9842
9843                 case SHASTA_EXT_LED_COMBO:
9844                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9845                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9846                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9847                                                  LED_CTRL_MODE_PHY_2);
9848                         break;
9849
9850                 };
9851
9852                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9853                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9854                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9855                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9856
9857                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9858                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9859                 else
9860                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9861
9862                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9863                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9864                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9865                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9866                 }
9867                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9868                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9869
9870                 if (cfg2 & (1 << 17))
9871                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9872
9873                 /* serdes signal pre-emphasis in register 0x590 set by */
9874                 /* bootcode if bit 18 is set */
9875                 if (cfg2 & (1 << 18))
9876                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9877         }
9878 }
9879
9880 static int __devinit tg3_phy_probe(struct tg3 *tp)
9881 {
9882         u32 hw_phy_id_1, hw_phy_id_2;
9883         u32 hw_phy_id, hw_phy_id_masked;
9884         int err;
9885
9886         /* Reading the PHY ID register can conflict with ASF
9887          * firwmare access to the PHY hardware.
9888          */
9889         err = 0;
9890         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9891                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9892         } else {
9893                 /* Now read the physical PHY_ID from the chip and verify
9894                  * that it is sane.  If it doesn't look good, we fall back
9895                  * to either the hard-coded table based PHY_ID and failing
9896                  * that the value found in the eeprom area.
9897                  */
9898                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9899                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9900
9901                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9902                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9903                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9904
9905                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9906         }
9907
9908         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9909                 tp->phy_id = hw_phy_id;
9910                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9911                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9912                 else
9913                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9914         } else {
9915                 if (tp->phy_id != PHY_ID_INVALID) {
9916                         /* Do nothing, phy ID already set up in
9917                          * tg3_get_eeprom_hw_cfg().
9918                          */
9919                 } else {
9920                         struct subsys_tbl_ent *p;
9921
9922                         /* No eeprom signature?  Try the hardcoded
9923                          * subsys device table.
9924                          */
9925                         p = lookup_by_subsys(tp);
9926                         if (!p)
9927                                 return -ENODEV;
9928
9929                         tp->phy_id = p->phy_id;
9930                         if (!tp->phy_id ||
9931                             tp->phy_id == PHY_ID_BCM8002)
9932                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9933                 }
9934         }
9935
9936         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9937             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9938                 u32 bmsr, adv_reg, tg3_ctrl;
9939
9940                 tg3_readphy(tp, MII_BMSR, &bmsr);
9941                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9942                     (bmsr & BMSR_LSTATUS))
9943                         goto skip_phy_reset;
9944                     
9945                 err = tg3_phy_reset(tp);
9946                 if (err)
9947                         return err;
9948
9949                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9950                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9951                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9952                 tg3_ctrl = 0;
9953                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9954                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9955                                     MII_TG3_CTRL_ADV_1000_FULL);
9956                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9957                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9958                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9959                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9960                 }
9961
9962                 if (!tg3_copper_is_advertising_all(tp)) {
9963                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9964
9965                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9966                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9967
9968                         tg3_writephy(tp, MII_BMCR,
9969                                      BMCR_ANENABLE | BMCR_ANRESTART);
9970                 }
9971                 tg3_phy_set_wirespeed(tp);
9972
9973                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9974                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9975                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9976         }
9977
9978 skip_phy_reset:
9979         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9980                 err = tg3_init_5401phy_dsp(tp);
9981                 if (err)
9982                         return err;
9983         }
9984
9985         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9986                 err = tg3_init_5401phy_dsp(tp);
9987         }
9988
9989         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9990                 tp->link_config.advertising =
9991                         (ADVERTISED_1000baseT_Half |
9992                          ADVERTISED_1000baseT_Full |
9993                          ADVERTISED_Autoneg |
9994                          ADVERTISED_FIBRE);
9995         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9996                 tp->link_config.advertising &=
9997                         ~(ADVERTISED_1000baseT_Half |
9998                           ADVERTISED_1000baseT_Full);
9999
10000         return err;
10001 }
10002
10003 static void __devinit tg3_read_partno(struct tg3 *tp)
10004 {
10005         unsigned char vpd_data[256];
10006         int i;
10007         u32 magic;
10008
10009         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10010                 goto out_not_found;
10011
10012         if (magic == TG3_EEPROM_MAGIC) {
10013                 for (i = 0; i < 256; i += 4) {
10014                         u32 tmp;
10015
10016                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10017                                 goto out_not_found;
10018
10019                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10020                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10021                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10022                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10023                 }
10024         } else {
10025                 int vpd_cap;
10026
10027                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10028                 for (i = 0; i < 256; i += 4) {
10029                         u32 tmp, j = 0;
10030                         u16 tmp16;
10031
10032                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10033                                               i);
10034                         while (j++ < 100) {
10035                                 pci_read_config_word(tp->pdev, vpd_cap +
10036                                                      PCI_VPD_ADDR, &tmp16);
10037                                 if (tmp16 & 0x8000)
10038                                         break;
10039                                 msleep(1);
10040                         }
10041                         if (!(tmp16 & 0x8000))
10042                                 goto out_not_found;
10043
10044                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10045                                               &tmp);
10046                         tmp = cpu_to_le32(tmp);
10047                         memcpy(&vpd_data[i], &tmp, 4);
10048                 }
10049         }
10050
10051         /* Now parse and find the part number. */
10052         for (i = 0; i < 256; ) {
10053                 unsigned char val = vpd_data[i];
10054                 int block_end;
10055
10056                 if (val == 0x82 || val == 0x91) {
10057                         i = (i + 3 +
10058                              (vpd_data[i + 1] +
10059                               (vpd_data[i + 2] << 8)));
10060                         continue;
10061                 }
10062
10063                 if (val != 0x90)
10064                         goto out_not_found;
10065
10066                 block_end = (i + 3 +
10067                              (vpd_data[i + 1] +
10068                               (vpd_data[i + 2] << 8)));
10069                 i += 3;
10070                 while (i < block_end) {
10071                         if (vpd_data[i + 0] == 'P' &&
10072                             vpd_data[i + 1] == 'N') {
10073                                 int partno_len = vpd_data[i + 2];
10074
10075                                 if (partno_len > 24)
10076                                         goto out_not_found;
10077
10078                                 memcpy(tp->board_part_number,
10079                                        &vpd_data[i + 3],
10080                                        partno_len);
10081
10082                                 /* Success. */
10083                                 return;
10084                         }
10085                 }
10086
10087                 /* Part number not found. */
10088                 goto out_not_found;
10089         }
10090
10091 out_not_found:
10092         strcpy(tp->board_part_number, "none");
10093 }
10094
10095 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10096 {
10097         u32 val, offset, start;
10098
10099         if (tg3_nvram_read_swab(tp, 0, &val))
10100                 return;
10101
10102         if (val != TG3_EEPROM_MAGIC)
10103                 return;
10104
10105         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10106             tg3_nvram_read_swab(tp, 0x4, &start))
10107                 return;
10108
10109         offset = tg3_nvram_logical_addr(tp, offset);
10110         if (tg3_nvram_read_swab(tp, offset, &val))
10111                 return;
10112
10113         if ((val & 0xfc000000) == 0x0c000000) {
10114                 u32 ver_offset, addr;
10115                 int i;
10116
10117                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10118                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10119                         return;
10120
10121                 if (val != 0)
10122                         return;
10123
10124                 addr = offset + ver_offset - start;
10125                 for (i = 0; i < 16; i += 4) {
10126                         if (tg3_nvram_read(tp, addr + i, &val))
10127                                 return;
10128
10129                         val = cpu_to_le32(val);
10130                         memcpy(tp->fw_ver + i, &val, 4);
10131                 }
10132         }
10133 }
10134
10135 static int __devinit tg3_get_invariants(struct tg3 *tp)
10136 {
10137         static struct pci_device_id write_reorder_chipsets[] = {
10138                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10139                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10140                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10141                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10142                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10143                              PCI_DEVICE_ID_VIA_8385_0) },
10144                 { },
10145         };
10146         u32 misc_ctrl_reg;
10147         u32 cacheline_sz_reg;
10148         u32 pci_state_reg, grc_misc_cfg;
10149         u32 val;
10150         u16 pci_cmd;
10151         int err;
10152
10153         /* Force memory write invalidate off.  If we leave it on,
10154          * then on 5700_BX chips we have to enable a workaround.
10155          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10156          * to match the cacheline size.  The Broadcom driver have this
10157          * workaround but turns MWI off all the times so never uses
10158          * it.  This seems to suggest that the workaround is insufficient.
10159          */
10160         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10161         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10162         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10163
10164         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10165          * has the register indirect write enable bit set before
10166          * we try to access any of the MMIO registers.  It is also
10167          * critical that the PCI-X hw workaround situation is decided
10168          * before that as well.
10169          */
10170         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10171                               &misc_ctrl_reg);
10172
10173         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10174                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10175
10176         /* Wrong chip ID in 5752 A0. This code can be removed later
10177          * as A0 is not in production.
10178          */
10179         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10180                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10181
10182         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10183          * we need to disable memory and use config. cycles
10184          * only to access all registers. The 5702/03 chips
10185          * can mistakenly decode the special cycles from the
10186          * ICH chipsets as memory write cycles, causing corruption
10187          * of register and memory space. Only certain ICH bridges
10188          * will drive special cycles with non-zero data during the
10189          * address phase which can fall within the 5703's address
10190          * range. This is not an ICH bug as the PCI spec allows
10191          * non-zero address during special cycles. However, only
10192          * these ICH bridges are known to drive non-zero addresses
10193          * during special cycles.
10194          *
10195          * Since special cycles do not cross PCI bridges, we only
10196          * enable this workaround if the 5703 is on the secondary
10197          * bus of these ICH bridges.
10198          */
10199         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10200             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10201                 static struct tg3_dev_id {
10202                         u32     vendor;
10203                         u32     device;
10204                         u32     rev;
10205                 } ich_chipsets[] = {
10206                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10207                           PCI_ANY_ID },
10208                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10209                           PCI_ANY_ID },
10210                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10211                           0xa },
10212                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10213                           PCI_ANY_ID },
10214                         { },
10215                 };
10216                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10217                 struct pci_dev *bridge = NULL;
10218
10219                 while (pci_id->vendor != 0) {
10220                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10221                                                 bridge);
10222                         if (!bridge) {
10223                                 pci_id++;
10224                                 continue;
10225                         }
10226                         if (pci_id->rev != PCI_ANY_ID) {
10227                                 u8 rev;
10228
10229                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10230                                                      &rev);
10231                                 if (rev > pci_id->rev)
10232                                         continue;
10233                         }
10234                         if (bridge->subordinate &&
10235                             (bridge->subordinate->number ==
10236                              tp->pdev->bus->number)) {
10237
10238                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10239                                 pci_dev_put(bridge);
10240                                 break;
10241                         }
10242                 }
10243         }
10244
10245         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10246          * DMA addresses > 40-bit. This bridge may have other additional
10247          * 57xx devices behind it in some 4-port NIC designs for example.
10248          * Any tg3 device found behind the bridge will also need the 40-bit
10249          * DMA workaround.
10250          */
10251         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10253                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10254                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10255                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10256         }
10257         else {
10258                 struct pci_dev *bridge = NULL;
10259
10260                 do {
10261                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10262                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10263                                                 bridge);
10264                         if (bridge && bridge->subordinate &&
10265                             (bridge->subordinate->number <=
10266                              tp->pdev->bus->number) &&
10267                             (bridge->subordinate->subordinate >=
10268                              tp->pdev->bus->number)) {
10269                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10270                                 pci_dev_put(bridge);
10271                                 break;
10272                         }
10273                 } while (bridge);
10274         }
10275
10276         /* Initialize misc host control in PCI block. */
10277         tp->misc_host_ctrl |= (misc_ctrl_reg &
10278                                MISC_HOST_CTRL_CHIPREV);
10279         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10280                                tp->misc_host_ctrl);
10281
10282         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10283                               &cacheline_sz_reg);
10284
10285         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10286         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10287         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10288         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10289
10290         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10291             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10292             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10293             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10294             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10295                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10296
10297         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10298             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10299                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10300
10301         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10302                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10303                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10304                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10305                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10306                 } else {
10307                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10308                                           TG3_FLG2_HW_TSO_1_BUG;
10309                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10310                                 ASIC_REV_5750 &&
10311                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10312                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10313                 }
10314         }
10315
10316         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10317             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10318             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10319             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10320             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10321                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10322
10323         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10324                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10325
10326         /* If we have an AMD 762 or VIA K8T800 chipset, write
10327          * reordering to the mailbox registers done by the host
10328          * controller can cause major troubles.  We read back from
10329          * every mailbox register write to force the writes to be
10330          * posted to the chip in order.
10331          */
10332         if (pci_dev_present(write_reorder_chipsets) &&
10333             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10334                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10335
10336         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10337             tp->pci_lat_timer < 64) {
10338                 tp->pci_lat_timer = 64;
10339
10340                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10341                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10342                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10343                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10344
10345                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10346                                        cacheline_sz_reg);
10347         }
10348
10349         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10350                               &pci_state_reg);
10351
10352         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10353                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10354
10355                 /* If this is a 5700 BX chipset, and we are in PCI-X
10356                  * mode, enable register write workaround.
10357                  *
10358                  * The workaround is to use indirect register accesses
10359                  * for all chip writes not to mailbox registers.
10360                  */
10361                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10362                         u32 pm_reg;
10363                         u16 pci_cmd;
10364
10365                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10366
10367                         /* The chip can have it's power management PCI config
10368                          * space registers clobbered due to this bug.
10369                          * So explicitly force the chip into D0 here.
10370                          */
10371                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10372                                               &pm_reg);
10373                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10374                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10375                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10376                                                pm_reg);
10377
10378                         /* Also, force SERR#/PERR# in PCI command. */
10379                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10380                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10381                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10382                 }
10383         }
10384
10385         /* 5700 BX chips need to have their TX producer index mailboxes
10386          * written twice to workaround a bug.
10387          */
10388         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10389                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10390
10391         /* Back to back register writes can cause problems on this chip,
10392          * the workaround is to read back all reg writes except those to
10393          * mailbox regs.  See tg3_write_indirect_reg32().
10394          *
10395          * PCI Express 5750_A0 rev chips need this workaround too.
10396          */
10397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10398             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10399              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10400                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10401
10402         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10403                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10404         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10405                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10406
10407         /* Chip-specific fixup from Broadcom driver */
10408         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10409             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10410                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10411                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10412         }
10413
10414         /* Default fast path register access methods */
10415         tp->read32 = tg3_read32;
10416         tp->write32 = tg3_write32;
10417         tp->read32_mbox = tg3_read32;
10418         tp->write32_mbox = tg3_write32;
10419         tp->write32_tx_mbox = tg3_write32;
10420         tp->write32_rx_mbox = tg3_write32;
10421
10422         /* Various workaround register access methods */
10423         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10424                 tp->write32 = tg3_write_indirect_reg32;
10425         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10426                 tp->write32 = tg3_write_flush_reg32;
10427
10428         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10429             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10430                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10431                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10432                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10433         }
10434
10435         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10436                 tp->read32 = tg3_read_indirect_reg32;
10437                 tp->write32 = tg3_write_indirect_reg32;
10438                 tp->read32_mbox = tg3_read_indirect_mbox;
10439                 tp->write32_mbox = tg3_write_indirect_mbox;
10440                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10441                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10442
10443                 iounmap(tp->regs);
10444                 tp->regs = NULL;
10445
10446                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10447                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10448                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10449         }
10450
10451         if (tp->write32 == tg3_write_indirect_reg32 ||
10452             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10453              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10454               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10455                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10456
10457         /* Get eeprom hw config before calling tg3_set_power_state().
10458          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10459          * determined before calling tg3_set_power_state() so that
10460          * we know whether or not to switch out of Vaux power.
10461          * When the flag is set, it means that GPIO1 is used for eeprom
10462          * write protect and also implies that it is a LOM where GPIOs
10463          * are not used to switch power.
10464          */ 
10465         tg3_get_eeprom_hw_cfg(tp);
10466
10467         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10468          * GPIO1 driven high will bring 5700's external PHY out of reset.
10469          * It is also used as eeprom write protect on LOMs.
10470          */
10471         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10472         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10473             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10474                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10475                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10476         /* Unused GPIO3 must be driven as output on 5752 because there
10477          * are no pull-up resistors on unused GPIO pins.
10478          */
10479         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10480                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10481
10482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10483                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10484
10485         /* Force the chip into D0. */
10486         err = tg3_set_power_state(tp, PCI_D0);
10487         if (err) {
10488                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10489                        pci_name(tp->pdev));
10490                 return err;
10491         }
10492
10493         /* 5700 B0 chips do not support checksumming correctly due
10494          * to hardware bugs.
10495          */
10496         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10497                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10498
10499         /* Derive initial jumbo mode from MTU assigned in
10500          * ether_setup() via the alloc_etherdev() call
10501          */
10502         if (tp->dev->mtu > ETH_DATA_LEN &&
10503             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10504                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10505
10506         /* Determine WakeOnLan speed to use. */
10507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10508             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10509             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10510             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10511                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10512         } else {
10513                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10514         }
10515
10516         /* A few boards don't want Ethernet@WireSpeed phy feature */
10517         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10518             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10519              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10520              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10521             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10522                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10523
10524         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10525             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10526                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10527         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10528                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10529
10530         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10531                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10532                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10533                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10534                 else
10535                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10536         }
10537
10538         tp->coalesce_mode = 0;
10539         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10540             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10541                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10542
10543         /* Initialize MAC MI mode, polling disabled. */
10544         tw32_f(MAC_MI_MODE, tp->mi_mode);
10545         udelay(80);
10546
10547         /* Initialize data/descriptor byte/word swapping. */
10548         val = tr32(GRC_MODE);
10549         val &= GRC_MODE_HOST_STACKUP;
10550         tw32(GRC_MODE, val | tp->grc_mode);
10551
10552         tg3_switch_clocks(tp);
10553
10554         /* Clear this out for sanity. */
10555         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10556
10557         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10558                               &pci_state_reg);
10559         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10560             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10561                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10562
10563                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10564                     chiprevid == CHIPREV_ID_5701_B0 ||
10565                     chiprevid == CHIPREV_ID_5701_B2 ||
10566                     chiprevid == CHIPREV_ID_5701_B5) {
10567                         void __iomem *sram_base;
10568
10569                         /* Write some dummy words into the SRAM status block
10570                          * area, see if it reads back correctly.  If the return
10571                          * value is bad, force enable the PCIX workaround.
10572                          */
10573                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10574
10575                         writel(0x00000000, sram_base);
10576                         writel(0x00000000, sram_base + 4);
10577                         writel(0xffffffff, sram_base + 4);
10578                         if (readl(sram_base) != 0x00000000)
10579                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10580                 }
10581         }
10582
10583         udelay(50);
10584         tg3_nvram_init(tp);
10585
10586         grc_misc_cfg = tr32(GRC_MISC_CFG);
10587         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10588
10589         /* Broadcom's driver says that CIOBE multisplit has a bug */
10590 #if 0
10591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10592             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10593                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10594                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10595         }
10596 #endif
10597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10598             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10599              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10600                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10601
10602         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10603             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10604                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10605         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10606                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10607                                       HOSTCC_MODE_CLRTICK_TXBD);
10608
10609                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10610                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10611                                        tp->misc_host_ctrl);
10612         }
10613
10614         /* these are limited to 10/100 only */
10615         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10616              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10617             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10618              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10619              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10620               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10621               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10622             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10623              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10624               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10625                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10626
10627         err = tg3_phy_probe(tp);
10628         if (err) {
10629                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10630                        pci_name(tp->pdev), err);
10631                 /* ... but do not return immediately ... */
10632         }
10633
10634         tg3_read_partno(tp);
10635         tg3_read_fw_ver(tp);
10636
10637         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10638                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10639         } else {
10640                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10641                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10642                 else
10643                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10644         }
10645
10646         /* 5700 {AX,BX} chips have a broken status block link
10647          * change bit implementation, so we must use the
10648          * status register in those cases.
10649          */
10650         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10651                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10652         else
10653                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10654
10655         /* The led_ctrl is set during tg3_phy_probe, here we might
10656          * have to force the link status polling mechanism based
10657          * upon subsystem IDs.
10658          */
10659         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10660             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10661                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10662                                   TG3_FLAG_USE_LINKCHG_REG);
10663         }
10664
10665         /* For all SERDES we poll the MAC status register. */
10666         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10667                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10668         else
10669                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10670
10671         /* All chips before 5787 can get confused if TX buffers
10672          * straddle the 4GB address boundary in some cases.
10673          */
10674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10676                 tp->dev->hard_start_xmit = tg3_start_xmit;
10677         else
10678                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10679
10680         tp->rx_offset = 2;
10681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10682             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10683                 tp->rx_offset = 0;
10684
10685         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10686
10687         /* Increment the rx prod index on the rx std ring by at most
10688          * 8 for these chips to workaround hw errata.
10689          */
10690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10691             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10692             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10693                 tp->rx_std_max_post = 8;
10694
10695         /* By default, disable wake-on-lan.  User can change this
10696          * using ETHTOOL_SWOL.
10697          */
10698         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10699
10700         return err;
10701 }
10702
10703 #ifdef CONFIG_SPARC64
10704 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10705 {
10706         struct net_device *dev = tp->dev;
10707         struct pci_dev *pdev = tp->pdev;
10708         struct pcidev_cookie *pcp = pdev->sysdata;
10709
10710         if (pcp != NULL) {
10711                 unsigned char *addr;
10712                 int len;
10713
10714                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10715                                         &len);
10716                 if (addr && len == 6) {
10717                         memcpy(dev->dev_addr, addr, 6);
10718                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10719                         return 0;
10720                 }
10721         }
10722         return -ENODEV;
10723 }
10724
10725 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10726 {
10727         struct net_device *dev = tp->dev;
10728
10729         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10730         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10731         return 0;
10732 }
10733 #endif
10734
10735 static int __devinit tg3_get_device_address(struct tg3 *tp)
10736 {
10737         struct net_device *dev = tp->dev;
10738         u32 hi, lo, mac_offset;
10739         int addr_ok = 0;
10740
10741 #ifdef CONFIG_SPARC64
10742         if (!tg3_get_macaddr_sparc(tp))
10743                 return 0;
10744 #endif
10745
10746         mac_offset = 0x7c;
10747         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10748             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10749                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10750                         mac_offset = 0xcc;
10751                 if (tg3_nvram_lock(tp))
10752                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10753                 else
10754                         tg3_nvram_unlock(tp);
10755         }
10756
10757         /* First try to get it from MAC address mailbox. */
10758         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10759         if ((hi >> 16) == 0x484b) {
10760                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10761                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10762
10763                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10764                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10765                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10766                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10767                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10768
10769                 /* Some old bootcode may report a 0 MAC address in SRAM */
10770                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10771         }
10772         if (!addr_ok) {
10773                 /* Next, try NVRAM. */
10774                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10775                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10776                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10777                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10778                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10779                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10780                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10781                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10782                 }
10783                 /* Finally just fetch it out of the MAC control regs. */
10784                 else {
10785                         hi = tr32(MAC_ADDR_0_HIGH);
10786                         lo = tr32(MAC_ADDR_0_LOW);
10787
10788                         dev->dev_addr[5] = lo & 0xff;
10789                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10790                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10791                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10792                         dev->dev_addr[1] = hi & 0xff;
10793                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10794                 }
10795         }
10796
10797         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10798 #ifdef CONFIG_SPARC64
10799                 if (!tg3_get_default_macaddr_sparc(tp))
10800                         return 0;
10801 #endif
10802                 return -EINVAL;
10803         }
10804         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10805         return 0;
10806 }
10807
10808 #define BOUNDARY_SINGLE_CACHELINE       1
10809 #define BOUNDARY_MULTI_CACHELINE        2
10810
10811 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10812 {
10813         int cacheline_size;
10814         u8 byte;
10815         int goal;
10816
10817         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10818         if (byte == 0)
10819                 cacheline_size = 1024;
10820         else
10821                 cacheline_size = (int) byte * 4;
10822
10823         /* On 5703 and later chips, the boundary bits have no
10824          * effect.
10825          */
10826         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10827             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10828             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10829                 goto out;
10830
10831 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10832         goal = BOUNDARY_MULTI_CACHELINE;
10833 #else
10834 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10835         goal = BOUNDARY_SINGLE_CACHELINE;
10836 #else
10837         goal = 0;
10838 #endif
10839 #endif
10840
10841         if (!goal)
10842                 goto out;
10843
10844         /* PCI controllers on most RISC systems tend to disconnect
10845          * when a device tries to burst across a cache-line boundary.
10846          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10847          *
10848          * Unfortunately, for PCI-E there are only limited
10849          * write-side controls for this, and thus for reads
10850          * we will still get the disconnects.  We'll also waste
10851          * these PCI cycles for both read and write for chips
10852          * other than 5700 and 5701 which do not implement the
10853          * boundary bits.
10854          */
10855         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10856             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10857                 switch (cacheline_size) {
10858                 case 16:
10859                 case 32:
10860                 case 64:
10861                 case 128:
10862                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10863                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10864                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10865                         } else {
10866                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10867                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10868                         }
10869                         break;
10870
10871                 case 256:
10872                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10873                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10874                         break;
10875
10876                 default:
10877                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10878                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10879                         break;
10880                 };
10881         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10882                 switch (cacheline_size) {
10883                 case 16:
10884                 case 32:
10885                 case 64:
10886                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10887                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10888                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10889                                 break;
10890                         }
10891                         /* fallthrough */
10892                 case 128:
10893                 default:
10894                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10895                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10896                         break;
10897                 };
10898         } else {
10899                 switch (cacheline_size) {
10900                 case 16:
10901                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10902                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10903                                         DMA_RWCTRL_WRITE_BNDRY_16);
10904                                 break;
10905                         }
10906                         /* fallthrough */
10907                 case 32:
10908                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10909                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10910                                         DMA_RWCTRL_WRITE_BNDRY_32);
10911                                 break;
10912                         }
10913                         /* fallthrough */
10914                 case 64:
10915                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10916                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10917                                         DMA_RWCTRL_WRITE_BNDRY_64);
10918                                 break;
10919                         }
10920                         /* fallthrough */
10921                 case 128:
10922                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10923                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10924                                         DMA_RWCTRL_WRITE_BNDRY_128);
10925                                 break;
10926                         }
10927                         /* fallthrough */
10928                 case 256:
10929                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10930                                 DMA_RWCTRL_WRITE_BNDRY_256);
10931                         break;
10932                 case 512:
10933                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10934                                 DMA_RWCTRL_WRITE_BNDRY_512);
10935                         break;
10936                 case 1024:
10937                 default:
10938                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10939                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10940                         break;
10941                 };
10942         }
10943
10944 out:
10945         return val;
10946 }
10947
10948 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10949 {
10950         struct tg3_internal_buffer_desc test_desc;
10951         u32 sram_dma_descs;
10952         int i, ret;
10953
10954         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10955
10956         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10957         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10958         tw32(RDMAC_STATUS, 0);
10959         tw32(WDMAC_STATUS, 0);
10960
10961         tw32(BUFMGR_MODE, 0);
10962         tw32(FTQ_RESET, 0);
10963
10964         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10965         test_desc.addr_lo = buf_dma & 0xffffffff;
10966         test_desc.nic_mbuf = 0x00002100;
10967         test_desc.len = size;
10968
10969         /*
10970          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10971          * the *second* time the tg3 driver was getting loaded after an
10972          * initial scan.
10973          *
10974          * Broadcom tells me:
10975          *   ...the DMA engine is connected to the GRC block and a DMA
10976          *   reset may affect the GRC block in some unpredictable way...
10977          *   The behavior of resets to individual blocks has not been tested.
10978          *
10979          * Broadcom noted the GRC reset will also reset all sub-components.
10980          */
10981         if (to_device) {
10982                 test_desc.cqid_sqid = (13 << 8) | 2;
10983
10984                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10985                 udelay(40);
10986         } else {
10987                 test_desc.cqid_sqid = (16 << 8) | 7;
10988
10989                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10990                 udelay(40);
10991         }
10992         test_desc.flags = 0x00000005;
10993
10994         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10995                 u32 val;
10996
10997                 val = *(((u32 *)&test_desc) + i);
10998                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10999                                        sram_dma_descs + (i * sizeof(u32)));
11000                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11001         }
11002         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11003
11004         if (to_device) {
11005                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11006         } else {
11007                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11008         }
11009
11010         ret = -ENODEV;
11011         for (i = 0; i < 40; i++) {
11012                 u32 val;
11013
11014                 if (to_device)
11015                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11016                 else
11017                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11018                 if ((val & 0xffff) == sram_dma_descs) {
11019                         ret = 0;
11020                         break;
11021                 }
11022
11023                 udelay(100);
11024         }
11025
11026         return ret;
11027 }
11028
11029 #define TEST_BUFFER_SIZE        0x2000
11030
11031 static int __devinit tg3_test_dma(struct tg3 *tp)
11032 {
11033         dma_addr_t buf_dma;
11034         u32 *buf, saved_dma_rwctrl;
11035         int ret;
11036
11037         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11038         if (!buf) {
11039                 ret = -ENOMEM;
11040                 goto out_nofree;
11041         }
11042
11043         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11044                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11045
11046         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11047
11048         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11049                 /* DMA read watermark not used on PCIE */
11050                 tp->dma_rwctrl |= 0x00180000;
11051         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11054                         tp->dma_rwctrl |= 0x003f0000;
11055                 else
11056                         tp->dma_rwctrl |= 0x003f000f;
11057         } else {
11058                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11060                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11061
11062                         /* If the 5704 is behind the EPB bridge, we can
11063                          * do the less restrictive ONE_DMA workaround for
11064                          * better performance.
11065                          */
11066                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11067                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11068                                 tp->dma_rwctrl |= 0x8000;
11069                         else if (ccval == 0x6 || ccval == 0x7)
11070                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11071
11072                         /* Set bit 23 to enable PCIX hw bug fix */
11073                         tp->dma_rwctrl |= 0x009f0000;
11074                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11075                         /* 5780 always in PCIX mode */
11076                         tp->dma_rwctrl |= 0x00144000;
11077                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11078                         /* 5714 always in PCIX mode */
11079                         tp->dma_rwctrl |= 0x00148000;
11080                 } else {
11081                         tp->dma_rwctrl |= 0x001b000f;
11082                 }
11083         }
11084
11085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11087                 tp->dma_rwctrl &= 0xfffffff0;
11088
11089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11091                 /* Remove this if it causes problems for some boards. */
11092                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11093
11094                 /* On 5700/5701 chips, we need to set this bit.
11095                  * Otherwise the chip will issue cacheline transactions
11096                  * to streamable DMA memory with not all the byte
11097                  * enables turned on.  This is an error on several
11098                  * RISC PCI controllers, in particular sparc64.
11099                  *
11100                  * On 5703/5704 chips, this bit has been reassigned
11101                  * a different meaning.  In particular, it is used
11102                  * on those chips to enable a PCI-X workaround.
11103                  */
11104                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11105         }
11106
11107         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11108
11109 #if 0
11110         /* Unneeded, already done by tg3_get_invariants.  */
11111         tg3_switch_clocks(tp);
11112 #endif
11113
11114         ret = 0;
11115         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11116             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11117                 goto out;
11118
11119         /* It is best to perform DMA test with maximum write burst size
11120          * to expose the 5700/5701 write DMA bug.
11121          */
11122         saved_dma_rwctrl = tp->dma_rwctrl;
11123         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11124         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11125
11126         while (1) {
11127                 u32 *p = buf, i;
11128
11129                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11130                         p[i] = i;
11131
11132                 /* Send the buffer to the chip. */
11133                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11134                 if (ret) {
11135                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11136                         break;
11137                 }
11138
11139 #if 0
11140                 /* validate data reached card RAM correctly. */
11141                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11142                         u32 val;
11143                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11144                         if (le32_to_cpu(val) != p[i]) {
11145                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11146                                 /* ret = -ENODEV here? */
11147                         }
11148                         p[i] = 0;
11149                 }
11150 #endif
11151                 /* Now read it back. */
11152                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11153                 if (ret) {
11154                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11155
11156                         break;
11157                 }
11158
11159                 /* Verify it. */
11160                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11161                         if (p[i] == i)
11162                                 continue;
11163
11164                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11165                             DMA_RWCTRL_WRITE_BNDRY_16) {
11166                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11167                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11168                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11169                                 break;
11170                         } else {
11171                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11172                                 ret = -ENODEV;
11173                                 goto out;
11174                         }
11175                 }
11176
11177                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11178                         /* Success. */
11179                         ret = 0;
11180                         break;
11181                 }
11182         }
11183         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11184             DMA_RWCTRL_WRITE_BNDRY_16) {
11185                 static struct pci_device_id dma_wait_state_chipsets[] = {
11186                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11187                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11188                         { },
11189                 };
11190
11191                 /* DMA test passed without adjusting DMA boundary,
11192                  * now look for chipsets that are known to expose the
11193                  * DMA bug without failing the test.
11194                  */
11195                 if (pci_dev_present(dma_wait_state_chipsets)) {
11196                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11197                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11198                 }
11199                 else
11200                         /* Safe to use the calculated DMA boundary. */
11201                         tp->dma_rwctrl = saved_dma_rwctrl;
11202
11203                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11204         }
11205
11206 out:
11207         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11208 out_nofree:
11209         return ret;
11210 }
11211
11212 static void __devinit tg3_init_link_config(struct tg3 *tp)
11213 {
11214         tp->link_config.advertising =
11215                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11216                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11217                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11218                  ADVERTISED_Autoneg | ADVERTISED_MII);
11219         tp->link_config.speed = SPEED_INVALID;
11220         tp->link_config.duplex = DUPLEX_INVALID;
11221         tp->link_config.autoneg = AUTONEG_ENABLE;
11222         tp->link_config.active_speed = SPEED_INVALID;
11223         tp->link_config.active_duplex = DUPLEX_INVALID;
11224         tp->link_config.phy_is_low_power = 0;
11225         tp->link_config.orig_speed = SPEED_INVALID;
11226         tp->link_config.orig_duplex = DUPLEX_INVALID;
11227         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11228 }
11229
11230 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11231 {
11232         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11233                 tp->bufmgr_config.mbuf_read_dma_low_water =
11234                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11235                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11236                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11237                 tp->bufmgr_config.mbuf_high_water =
11238                         DEFAULT_MB_HIGH_WATER_5705;
11239
11240                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11241                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11242                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11243                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11244                 tp->bufmgr_config.mbuf_high_water_jumbo =
11245                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11246         } else {
11247                 tp->bufmgr_config.mbuf_read_dma_low_water =
11248                         DEFAULT_MB_RDMA_LOW_WATER;
11249                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11250                         DEFAULT_MB_MACRX_LOW_WATER;
11251                 tp->bufmgr_config.mbuf_high_water =
11252                         DEFAULT_MB_HIGH_WATER;
11253
11254                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11255                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11256                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11257                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11258                 tp->bufmgr_config.mbuf_high_water_jumbo =
11259                         DEFAULT_MB_HIGH_WATER_JUMBO;
11260         }
11261
11262         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11263         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11264 }
11265
11266 static char * __devinit tg3_phy_string(struct tg3 *tp)
11267 {
11268         switch (tp->phy_id & PHY_ID_MASK) {
11269         case PHY_ID_BCM5400:    return "5400";
11270         case PHY_ID_BCM5401:    return "5401";
11271         case PHY_ID_BCM5411:    return "5411";
11272         case PHY_ID_BCM5701:    return "5701";
11273         case PHY_ID_BCM5703:    return "5703";
11274         case PHY_ID_BCM5704:    return "5704";
11275         case PHY_ID_BCM5705:    return "5705";
11276         case PHY_ID_BCM5750:    return "5750";
11277         case PHY_ID_BCM5752:    return "5752";
11278         case PHY_ID_BCM5714:    return "5714";
11279         case PHY_ID_BCM5780:    return "5780";
11280         case PHY_ID_BCM5755:    return "5755";
11281         case PHY_ID_BCM5787:    return "5787";
11282         case PHY_ID_BCM8002:    return "8002/serdes";
11283         case 0:                 return "serdes";
11284         default:                return "unknown";
11285         };
11286 }
11287
11288 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11289 {
11290         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11291                 strcpy(str, "PCI Express");
11292                 return str;
11293         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11294                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11295
11296                 strcpy(str, "PCIX:");
11297
11298                 if ((clock_ctrl == 7) ||
11299                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11300                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11301                         strcat(str, "133MHz");
11302                 else if (clock_ctrl == 0)
11303                         strcat(str, "33MHz");
11304                 else if (clock_ctrl == 2)
11305                         strcat(str, "50MHz");
11306                 else if (clock_ctrl == 4)
11307                         strcat(str, "66MHz");
11308                 else if (clock_ctrl == 6)
11309                         strcat(str, "100MHz");
11310         } else {
11311                 strcpy(str, "PCI:");
11312                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11313                         strcat(str, "66MHz");
11314                 else
11315                         strcat(str, "33MHz");
11316         }
11317         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11318                 strcat(str, ":32-bit");
11319         else
11320                 strcat(str, ":64-bit");
11321         return str;
11322 }
11323
11324 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11325 {
11326         struct pci_dev *peer;
11327         unsigned int func, devnr = tp->pdev->devfn & ~7;
11328
11329         for (func = 0; func < 8; func++) {
11330                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11331                 if (peer && peer != tp->pdev)
11332                         break;
11333                 pci_dev_put(peer);
11334         }
11335         /* 5704 can be configured in single-port mode, set peer to
11336          * tp->pdev in that case.
11337          */
11338         if (!peer) {
11339                 peer = tp->pdev;
11340                 return peer;
11341         }
11342
11343         /*
11344          * We don't need to keep the refcount elevated; there's no way
11345          * to remove one half of this device without removing the other
11346          */
11347         pci_dev_put(peer);
11348
11349         return peer;
11350 }
11351
11352 static void __devinit tg3_init_coal(struct tg3 *tp)
11353 {
11354         struct ethtool_coalesce *ec = &tp->coal;
11355
11356         memset(ec, 0, sizeof(*ec));
11357         ec->cmd = ETHTOOL_GCOALESCE;
11358         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11359         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11360         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11361         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11362         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11363         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11364         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11365         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11366         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11367
11368         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11369                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11370                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11371                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11372                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11373                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11374         }
11375
11376         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11377                 ec->rx_coalesce_usecs_irq = 0;
11378                 ec->tx_coalesce_usecs_irq = 0;
11379                 ec->stats_block_coalesce_usecs = 0;
11380         }
11381 }
11382
11383 static int __devinit tg3_init_one(struct pci_dev *pdev,
11384                                   const struct pci_device_id *ent)
11385 {
11386         static int tg3_version_printed = 0;
11387         unsigned long tg3reg_base, tg3reg_len;
11388         struct net_device *dev;
11389         struct tg3 *tp;
11390         int i, err, pm_cap;
11391         char str[40];
11392         u64 dma_mask, persist_dma_mask;
11393
11394         if (tg3_version_printed++ == 0)
11395                 printk(KERN_INFO "%s", version);
11396
11397         err = pci_enable_device(pdev);
11398         if (err) {
11399                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11400                        "aborting.\n");
11401                 return err;
11402         }
11403
11404         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11405                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11406                        "base address, aborting.\n");
11407                 err = -ENODEV;
11408                 goto err_out_disable_pdev;
11409         }
11410
11411         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11412         if (err) {
11413                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11414                        "aborting.\n");
11415                 goto err_out_disable_pdev;
11416         }
11417
11418         pci_set_master(pdev);
11419
11420         /* Find power-management capability. */
11421         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11422         if (pm_cap == 0) {
11423                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11424                        "aborting.\n");
11425                 err = -EIO;
11426                 goto err_out_free_res;
11427         }
11428
11429         tg3reg_base = pci_resource_start(pdev, 0);
11430         tg3reg_len = pci_resource_len(pdev, 0);
11431
11432         dev = alloc_etherdev(sizeof(*tp));
11433         if (!dev) {
11434                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11435                 err = -ENOMEM;
11436                 goto err_out_free_res;
11437         }
11438
11439         SET_MODULE_OWNER(dev);
11440         SET_NETDEV_DEV(dev, &pdev->dev);
11441
11442 #if TG3_VLAN_TAG_USED
11443         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11444         dev->vlan_rx_register = tg3_vlan_rx_register;
11445         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11446 #endif
11447
11448         tp = netdev_priv(dev);
11449         tp->pdev = pdev;
11450         tp->dev = dev;
11451         tp->pm_cap = pm_cap;
11452         tp->mac_mode = TG3_DEF_MAC_MODE;
11453         tp->rx_mode = TG3_DEF_RX_MODE;
11454         tp->tx_mode = TG3_DEF_TX_MODE;
11455         tp->mi_mode = MAC_MI_MODE_BASE;
11456         if (tg3_debug > 0)
11457                 tp->msg_enable = tg3_debug;
11458         else
11459                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11460
11461         /* The word/byte swap controls here control register access byte
11462          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11463          * setting below.
11464          */
11465         tp->misc_host_ctrl =
11466                 MISC_HOST_CTRL_MASK_PCI_INT |
11467                 MISC_HOST_CTRL_WORD_SWAP |
11468                 MISC_HOST_CTRL_INDIR_ACCESS |
11469                 MISC_HOST_CTRL_PCISTATE_RW;
11470
11471         /* The NONFRM (non-frame) byte/word swap controls take effect
11472          * on descriptor entries, anything which isn't packet data.
11473          *
11474          * The StrongARM chips on the board (one for tx, one for rx)
11475          * are running in big-endian mode.
11476          */
11477         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11478                         GRC_MODE_WSWAP_NONFRM_DATA);
11479 #ifdef __BIG_ENDIAN
11480         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11481 #endif
11482         spin_lock_init(&tp->lock);
11483         spin_lock_init(&tp->indirect_lock);
11484         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11485
11486         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11487         if (tp->regs == 0UL) {
11488                 printk(KERN_ERR PFX "Cannot map device registers, "
11489                        "aborting.\n");
11490                 err = -ENOMEM;
11491                 goto err_out_free_dev;
11492         }
11493
11494         tg3_init_link_config(tp);
11495
11496         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11497         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11498         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11499
11500         dev->open = tg3_open;
11501         dev->stop = tg3_close;
11502         dev->get_stats = tg3_get_stats;
11503         dev->set_multicast_list = tg3_set_rx_mode;
11504         dev->set_mac_address = tg3_set_mac_addr;
11505         dev->do_ioctl = tg3_ioctl;
11506         dev->tx_timeout = tg3_tx_timeout;
11507         dev->poll = tg3_poll;
11508         dev->ethtool_ops = &tg3_ethtool_ops;
11509         dev->weight = 64;
11510         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11511         dev->change_mtu = tg3_change_mtu;
11512         dev->irq = pdev->irq;
11513 #ifdef CONFIG_NET_POLL_CONTROLLER
11514         dev->poll_controller = tg3_poll_controller;
11515 #endif
11516
11517         err = tg3_get_invariants(tp);
11518         if (err) {
11519                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11520                        "aborting.\n");
11521                 goto err_out_iounmap;
11522         }
11523
11524         /* The EPB bridge inside 5714, 5715, and 5780 and any
11525          * device behind the EPB cannot support DMA addresses > 40-bit.
11526          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11527          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11528          * do DMA address check in tg3_start_xmit().
11529          */
11530         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11531                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11532         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11533                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11534 #ifdef CONFIG_HIGHMEM
11535                 dma_mask = DMA_64BIT_MASK;
11536 #endif
11537         } else
11538                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11539
11540         /* Configure DMA attributes. */
11541         if (dma_mask > DMA_32BIT_MASK) {
11542                 err = pci_set_dma_mask(pdev, dma_mask);
11543                 if (!err) {
11544                         dev->features |= NETIF_F_HIGHDMA;
11545                         err = pci_set_consistent_dma_mask(pdev,
11546                                                           persist_dma_mask);
11547                         if (err < 0) {
11548                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11549                                        "DMA for consistent allocations\n");
11550                                 goto err_out_iounmap;
11551                         }
11552                 }
11553         }
11554         if (err || dma_mask == DMA_32BIT_MASK) {
11555                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11556                 if (err) {
11557                         printk(KERN_ERR PFX "No usable DMA configuration, "
11558                                "aborting.\n");
11559                         goto err_out_iounmap;
11560                 }
11561         }
11562
11563         tg3_init_bufmgr_config(tp);
11564
11565 #if TG3_TSO_SUPPORT != 0
11566         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11567                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11568         }
11569         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11571             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11572             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11573                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11574         } else {
11575                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11576         }
11577
11578         /* TSO is on by default on chips that support hardware TSO.
11579          * Firmware TSO on older chips gives lower performance, so it
11580          * is off by default, but can be enabled using ethtool.
11581          */
11582         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11583                 dev->features |= NETIF_F_TSO;
11584                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11585                         dev->features |= NETIF_F_TSO6;
11586         }
11587
11588 #endif
11589
11590         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11591             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11592             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11593                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11594                 tp->rx_pending = 63;
11595         }
11596
11597         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11598             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11599                 tp->pdev_peer = tg3_find_peer(tp);
11600
11601         err = tg3_get_device_address(tp);
11602         if (err) {
11603                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11604                        "aborting.\n");
11605                 goto err_out_iounmap;
11606         }
11607
11608         /*
11609          * Reset chip in case UNDI or EFI driver did not shutdown
11610          * DMA self test will enable WDMAC and we'll see (spurious)
11611          * pending DMA on the PCI bus at that point.
11612          */
11613         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11614             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11615                 pci_save_state(tp->pdev);
11616                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11617                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11618         }
11619
11620         err = tg3_test_dma(tp);
11621         if (err) {
11622                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11623                 goto err_out_iounmap;
11624         }
11625
11626         /* Tigon3 can do ipv4 only... and some chips have buggy
11627          * checksumming.
11628          */
11629         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11631                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11632                         dev->features |= NETIF_F_HW_CSUM;
11633                 else
11634                         dev->features |= NETIF_F_IP_CSUM;
11635                 dev->features |= NETIF_F_SG;
11636                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11637         } else
11638                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11639
11640         /* flow control autonegotiation is default behavior */
11641         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11642
11643         tg3_init_coal(tp);
11644
11645         /* Now that we have fully setup the chip, save away a snapshot
11646          * of the PCI config space.  We need to restore this after
11647          * GRC_MISC_CFG core clock resets and some resume events.
11648          */
11649         pci_save_state(tp->pdev);
11650
11651         err = register_netdev(dev);
11652         if (err) {
11653                 printk(KERN_ERR PFX "Cannot register net device, "
11654                        "aborting.\n");
11655                 goto err_out_iounmap;
11656         }
11657
11658         pci_set_drvdata(pdev, dev);
11659
11660         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11661                dev->name,
11662                tp->board_part_number,
11663                tp->pci_chip_rev_id,
11664                tg3_phy_string(tp),
11665                tg3_bus_string(tp, str),
11666                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11667
11668         for (i = 0; i < 6; i++)
11669                 printk("%2.2x%c", dev->dev_addr[i],
11670                        i == 5 ? '\n' : ':');
11671
11672         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11673                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11674                "TSOcap[%d] \n",
11675                dev->name,
11676                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11677                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11678                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11679                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11680                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11681                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11682                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11683         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11684                dev->name, tp->dma_rwctrl,
11685                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11686                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11687
11688         netif_carrier_off(tp->dev);
11689
11690         return 0;
11691
11692 err_out_iounmap:
11693         if (tp->regs) {
11694                 iounmap(tp->regs);
11695                 tp->regs = NULL;
11696         }
11697
11698 err_out_free_dev:
11699         free_netdev(dev);
11700
11701 err_out_free_res:
11702         pci_release_regions(pdev);
11703
11704 err_out_disable_pdev:
11705         pci_disable_device(pdev);
11706         pci_set_drvdata(pdev, NULL);
11707         return err;
11708 }
11709
11710 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11711 {
11712         struct net_device *dev = pci_get_drvdata(pdev);
11713
11714         if (dev) {
11715                 struct tg3 *tp = netdev_priv(dev);
11716
11717                 flush_scheduled_work();
11718                 unregister_netdev(dev);
11719                 if (tp->regs) {
11720                         iounmap(tp->regs);
11721                         tp->regs = NULL;
11722                 }
11723                 free_netdev(dev);
11724                 pci_release_regions(pdev);
11725                 pci_disable_device(pdev);
11726                 pci_set_drvdata(pdev, NULL);
11727         }
11728 }
11729
11730 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11731 {
11732         struct net_device *dev = pci_get_drvdata(pdev);
11733         struct tg3 *tp = netdev_priv(dev);
11734         int err;
11735
11736         if (!netif_running(dev))
11737                 return 0;
11738
11739         flush_scheduled_work();
11740         tg3_netif_stop(tp);
11741
11742         del_timer_sync(&tp->timer);
11743
11744         tg3_full_lock(tp, 1);
11745         tg3_disable_ints(tp);
11746         tg3_full_unlock(tp);
11747
11748         netif_device_detach(dev);
11749
11750         tg3_full_lock(tp, 0);
11751         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11752         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11753         tg3_full_unlock(tp);
11754
11755         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11756         if (err) {
11757                 tg3_full_lock(tp, 0);
11758
11759                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11760                 if (tg3_restart_hw(tp, 1))
11761                         goto out;
11762
11763                 tp->timer.expires = jiffies + tp->timer_offset;
11764                 add_timer(&tp->timer);
11765
11766                 netif_device_attach(dev);
11767                 tg3_netif_start(tp);
11768
11769 out:
11770                 tg3_full_unlock(tp);
11771         }
11772
11773         return err;
11774 }
11775
11776 static int tg3_resume(struct pci_dev *pdev)
11777 {
11778         struct net_device *dev = pci_get_drvdata(pdev);
11779         struct tg3 *tp = netdev_priv(dev);
11780         int err;
11781
11782         if (!netif_running(dev))
11783                 return 0;
11784
11785         pci_restore_state(tp->pdev);
11786
11787         err = tg3_set_power_state(tp, PCI_D0);
11788         if (err)
11789                 return err;
11790
11791         netif_device_attach(dev);
11792
11793         tg3_full_lock(tp, 0);
11794
11795         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11796         err = tg3_restart_hw(tp, 1);
11797         if (err)
11798                 goto out;
11799
11800         tp->timer.expires = jiffies + tp->timer_offset;
11801         add_timer(&tp->timer);
11802
11803         tg3_netif_start(tp);
11804
11805 out:
11806         tg3_full_unlock(tp);
11807
11808         return err;
11809 }
11810
11811 static struct pci_driver tg3_driver = {
11812         .name           = DRV_MODULE_NAME,
11813         .id_table       = tg3_pci_tbl,
11814         .probe          = tg3_init_one,
11815         .remove         = __devexit_p(tg3_remove_one),
11816         .suspend        = tg3_suspend,
11817         .resume         = tg3_resume
11818 };
11819
11820 static int __init tg3_init(void)
11821 {
11822         return pci_module_init(&tg3_driver);
11823 }
11824
11825 static void __exit tg3_cleanup(void)
11826 {
11827         pci_unregister_driver(&tg3_driver);
11828 }
11829
11830 module_init(tg3_init);
11831 module_exit(tg3_cleanup);