[PATCH] i4l/isdn_tty.c: fix a check-after-use
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.54"
73 #define DRV_MODULE_RELDATE      "Mar 23, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503         /* Always leave this as zero. */
504         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 }
507
508 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
509 {
510         /* If no workaround is needed, write to mem space directly */
511         if (tp->write32 != tg3_write_indirect_reg32)
512                 tw32(NIC_SRAM_WIN_BASE + off, val);
513         else
514                 tg3_write_mem(tp, off, val);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524
525         /* Always leave this as zero. */
526         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         spin_unlock_irqrestore(&tp->indirect_lock, flags);
528 }
529
530 static void tg3_disable_ints(struct tg3 *tp)
531 {
532         tw32(TG3PCI_MISC_HOST_CTRL,
533              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
534         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
535 }
536
537 static inline void tg3_cond_int(struct tg3 *tp)
538 {
539         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
540             (tp->hw_status->status & SD_STATUS_UPDATED))
541                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
542 }
543
544 static void tg3_enable_ints(struct tg3 *tp)
545 {
546         tp->irq_sync = 0;
547         wmb();
548
549         tw32(TG3PCI_MISC_HOST_CTRL,
550              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
551         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
552                        (tp->last_tag << 24));
553         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
554                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
555                                (tp->last_tag << 24));
556         tg3_cond_int(tp);
557 }
558
559 static inline unsigned int tg3_has_work(struct tg3 *tp)
560 {
561         struct tg3_hw_status *sblk = tp->hw_status;
562         unsigned int work_exists = 0;
563
564         /* check for phy events */
565         if (!(tp->tg3_flags &
566               (TG3_FLAG_USE_LINKCHG_REG |
567                TG3_FLAG_POLL_SERDES))) {
568                 if (sblk->status & SD_STATUS_LINK_CHG)
569                         work_exists = 1;
570         }
571         /* check for RX/TX work to do */
572         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
573             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
574                 work_exists = 1;
575
576         return work_exists;
577 }
578
579 /* tg3_restart_ints
580  *  similar to tg3_enable_ints, but it accurately determines whether there
581  *  is new work pending and can return without flushing the PIO write
582  *  which reenables interrupts 
583  */
584 static void tg3_restart_ints(struct tg3 *tp)
585 {
586         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
587                      tp->last_tag << 24);
588         mmiowb();
589
590         /* When doing tagged status, this work check is unnecessary.
591          * The last_tag we write above tells the chip which piece of
592          * work we've completed.
593          */
594         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
595             tg3_has_work(tp))
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static inline void tg3_netif_stop(struct tg3 *tp)
601 {
602         tp->dev->trans_start = jiffies; /* prevent tx timeout */
603         netif_poll_disable(tp->dev);
604         netif_tx_disable(tp->dev);
605 }
606
607 static inline void tg3_netif_start(struct tg3 *tp)
608 {
609         netif_wake_queue(tp->dev);
610         /* NOTE: unconditional netif_wake_queue is only appropriate
611          * so long as all callers are assured to have free tx slots
612          * (such as after tg3_init_hw)
613          */
614         netif_poll_enable(tp->dev);
615         tp->hw_status->status |= SD_STATUS_UPDATED;
616         tg3_enable_ints(tp);
617 }
618
619 static void tg3_switch_clocks(struct tg3 *tp)
620 {
621         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
622         u32 orig_clock_ctrl;
623
624         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
625                 return;
626
627         orig_clock_ctrl = clock_ctrl;
628         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
629                        CLOCK_CTRL_CLKRUN_OENABLE |
630                        0x1f);
631         tp->pci_clock_ctrl = clock_ctrl;
632
633         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
634                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
635                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
636                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
637                 }
638         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
639                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
640                             clock_ctrl |
641                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
642                             40);
643                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
645                             40);
646         }
647         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
648 }
649
650 #define PHY_BUSY_LOOPS  5000
651
652 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
653 {
654         u32 frame_val;
655         unsigned int loops;
656         int ret;
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE,
660                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
661                 udelay(80);
662         }
663
664         *val = 0x0;
665
666         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
667                       MI_COM_PHY_ADDR_MASK);
668         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
669                       MI_COM_REG_ADDR_MASK);
670         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
671         
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678
679                 if ((frame_val & MI_COM_BUSY) == 0) {
680                         udelay(5);
681                         frame_val = tr32(MAC_MI_COM);
682                         break;
683                 }
684                 loops -= 1;
685         }
686
687         ret = -EBUSY;
688         if (loops != 0) {
689                 *val = frame_val & MI_COM_DATA_MASK;
690                 ret = 0;
691         }
692
693         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
694                 tw32_f(MAC_MI_MODE, tp->mi_mode);
695                 udelay(80);
696         }
697
698         return ret;
699 }
700
701 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
702 {
703         u32 frame_val;
704         unsigned int loops;
705         int ret;
706
707         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
708                 tw32_f(MAC_MI_MODE,
709                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
710                 udelay(80);
711         }
712
713         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
714                       MI_COM_PHY_ADDR_MASK);
715         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
716                       MI_COM_REG_ADDR_MASK);
717         frame_val |= (val & MI_COM_DATA_MASK);
718         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
719         
720         tw32_f(MAC_MI_COM, frame_val);
721
722         loops = PHY_BUSY_LOOPS;
723         while (loops != 0) {
724                 udelay(10);
725                 frame_val = tr32(MAC_MI_COM);
726                 if ((frame_val & MI_COM_BUSY) == 0) {
727                         udelay(5);
728                         frame_val = tr32(MAC_MI_COM);
729                         break;
730                 }
731                 loops -= 1;
732         }
733
734         ret = -EBUSY;
735         if (loops != 0)
736                 ret = 0;
737
738         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739                 tw32_f(MAC_MI_MODE, tp->mi_mode);
740                 udelay(80);
741         }
742
743         return ret;
744 }
745
746 static void tg3_phy_set_wirespeed(struct tg3 *tp)
747 {
748         u32 val;
749
750         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
751                 return;
752
753         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
754             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
755                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
756                              (val | (1 << 15) | (1 << 4)));
757 }
758
759 static int tg3_bmcr_reset(struct tg3 *tp)
760 {
761         u32 phy_control;
762         int limit, err;
763
764         /* OK, reset it, and poll the BMCR_RESET bit until it
765          * clears or we time out.
766          */
767         phy_control = BMCR_RESET;
768         err = tg3_writephy(tp, MII_BMCR, phy_control);
769         if (err != 0)
770                 return -EBUSY;
771
772         limit = 5000;
773         while (limit--) {
774                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
775                 if (err != 0)
776                         return -EBUSY;
777
778                 if ((phy_control & BMCR_RESET) == 0) {
779                         udelay(40);
780                         break;
781                 }
782                 udelay(10);
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_wait_macro_done(struct tg3 *tp)
791 {
792         int limit = 100;
793
794         while (limit--) {
795                 u32 tmp32;
796
797                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
798                         if ((tmp32 & 0x1000) == 0)
799                                 break;
800                 }
801         }
802         if (limit <= 0)
803                 return -EBUSY;
804
805         return 0;
806 }
807
808 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
809 {
810         static const u32 test_pat[4][6] = {
811         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
812         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
813         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
814         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
815         };
816         int chan;
817
818         for (chan = 0; chan < 4; chan++) {
819                 int i;
820
821                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
822                              (chan * 0x2000) | 0x0200);
823                 tg3_writephy(tp, 0x16, 0x0002);
824
825                 for (i = 0; i < 6; i++)
826                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
827                                      test_pat[chan][i]);
828
829                 tg3_writephy(tp, 0x16, 0x0202);
830                 if (tg3_wait_macro_done(tp)) {
831                         *resetp = 1;
832                         return -EBUSY;
833                 }
834
835                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
836                              (chan * 0x2000) | 0x0200);
837                 tg3_writephy(tp, 0x16, 0x0082);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, 0x16, 0x0802);
844                 if (tg3_wait_macro_done(tp)) {
845                         *resetp = 1;
846                         return -EBUSY;
847                 }
848
849                 for (i = 0; i < 6; i += 2) {
850                         u32 low, high;
851
852                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
853                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
854                             tg3_wait_macro_done(tp)) {
855                                 *resetp = 1;
856                                 return -EBUSY;
857                         }
858                         low &= 0x7fff;
859                         high &= 0x000f;
860                         if (low != test_pat[chan][i] ||
861                             high != test_pat[chan][i+1]) {
862                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
863                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
864                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
865
866                                 return -EBUSY;
867                         }
868                 }
869         }
870
871         return 0;
872 }
873
874 static int tg3_phy_reset_chanpat(struct tg3 *tp)
875 {
876         int chan;
877
878         for (chan = 0; chan < 4; chan++) {
879                 int i;
880
881                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
882                              (chan * 0x2000) | 0x0200);
883                 tg3_writephy(tp, 0x16, 0x0002);
884                 for (i = 0; i < 6; i++)
885                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
886                 tg3_writephy(tp, 0x16, 0x0202);
887                 if (tg3_wait_macro_done(tp))
888                         return -EBUSY;
889         }
890
891         return 0;
892 }
893
894 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
895 {
896         u32 reg32, phy9_orig;
897         int retries, do_phy_reset, err;
898
899         retries = 10;
900         do_phy_reset = 1;
901         do {
902                 if (do_phy_reset) {
903                         err = tg3_bmcr_reset(tp);
904                         if (err)
905                                 return err;
906                         do_phy_reset = 0;
907                 }
908
909                 /* Disable transmitter and interrupt.  */
910                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
911                         continue;
912
913                 reg32 |= 0x3000;
914                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
915
916                 /* Set full-duplex, 1000 mbps.  */
917                 tg3_writephy(tp, MII_BMCR,
918                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
919
920                 /* Set to master mode.  */
921                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
922                         continue;
923
924                 tg3_writephy(tp, MII_TG3_CTRL,
925                              (MII_TG3_CTRL_AS_MASTER |
926                               MII_TG3_CTRL_ENABLE_AS_MASTER));
927
928                 /* Enable SM_DSP_CLOCK and 6dB.  */
929                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
930
931                 /* Block the PHY control access.  */
932                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
933                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
934
935                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
936                 if (!err)
937                         break;
938         } while (--retries);
939
940         err = tg3_phy_reset_chanpat(tp);
941         if (err)
942                 return err;
943
944         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
945         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
946
947         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
948         tg3_writephy(tp, 0x16, 0x0000);
949
950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
952                 /* Set Extended packet length bit for jumbo frames */
953                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
954         }
955         else {
956                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
957         }
958
959         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
960
961         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
962                 reg32 &= ~0x3000;
963                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
964         } else if (!err)
965                 err = -EBUSY;
966
967         return err;
968 }
969
970 /* This will reset the tigon3 PHY if there is no valid
971  * link unless the FORCE argument is non-zero.
972  */
973 static int tg3_phy_reset(struct tg3 *tp)
974 {
975         u32 phy_status;
976         int err;
977
978         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
979         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
980         if (err != 0)
981                 return -EBUSY;
982
983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
986                 err = tg3_phy_reset_5703_4_5(tp);
987                 if (err)
988                         return err;
989                 goto out;
990         }
991
992         err = tg3_bmcr_reset(tp);
993         if (err)
994                 return err;
995
996 out:
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
999                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1000                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1001                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1002                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1006                 tg3_writephy(tp, 0x1c, 0x8d68);
1007                 tg3_writephy(tp, 0x1c, 0x8d68);
1008         }
1009         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1014                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1015                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1016                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1018         }
1019         /* Set Extended packet length bit (bit 14) on all chips that */
1020         /* support jumbo frames */
1021         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1022                 /* Cannot do read-modify-write on 5401 */
1023                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1024         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1025                 u32 phy_reg;
1026
1027                 /* Set bit 14 with read-modify-write to preserve other bits */
1028                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1029                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1030                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1031         }
1032
1033         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1034          * jumbo frames transmission.
1035          */
1036         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1037                 u32 phy_reg;
1038
1039                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1040                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1041                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1042         }
1043
1044         tg3_phy_set_wirespeed(tp);
1045         return 0;
1046 }
1047
1048 static void tg3_frob_aux_power(struct tg3 *tp)
1049 {
1050         struct tg3 *tp_peer = tp;
1051
1052         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1053                 return;
1054
1055         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1056             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1057                 struct net_device *dev_peer;
1058
1059                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1060                 /* remove_one() may have been run on the peer. */
1061                 if (!dev_peer)
1062                         tp_peer = tp;
1063                 else
1064                         tp_peer = netdev_priv(dev_peer);
1065         }
1066
1067         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1068             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1069             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1070             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1072                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1073                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1074                                     (GRC_LCLCTRL_GPIO_OE0 |
1075                                      GRC_LCLCTRL_GPIO_OE1 |
1076                                      GRC_LCLCTRL_GPIO_OE2 |
1077                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1078                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1079                                     100);
1080                 } else {
1081                         u32 no_gpio2;
1082                         u32 grc_local_ctrl = 0;
1083
1084                         if (tp_peer != tp &&
1085                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1086                                 return;
1087
1088                         /* Workaround to prevent overdrawing Amps. */
1089                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1090                             ASIC_REV_5714) {
1091                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1092                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093                                             grc_local_ctrl, 100);
1094                         }
1095
1096                         /* On 5753 and variants, GPIO2 cannot be used. */
1097                         no_gpio2 = tp->nic_sram_data_cfg &
1098                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1099
1100                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1101                                          GRC_LCLCTRL_GPIO_OE1 |
1102                                          GRC_LCLCTRL_GPIO_OE2 |
1103                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1104                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1105                         if (no_gpio2) {
1106                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1107                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1108                         }
1109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110                                                     grc_local_ctrl, 100);
1111
1112                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1113
1114                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115                                                     grc_local_ctrl, 100);
1116
1117                         if (!no_gpio2) {
1118                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1119                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1120                                             grc_local_ctrl, 100);
1121                         }
1122                 }
1123         } else {
1124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1125                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1126                         if (tp_peer != tp &&
1127                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1128                                 return;
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     (GRC_LCLCTRL_GPIO_OE1 |
1132                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1133
1134                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135                                     GRC_LCLCTRL_GPIO_OE1, 100);
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140                 }
1141         }
1142 }
1143
1144 static int tg3_setup_phy(struct tg3 *, int);
1145
1146 #define RESET_KIND_SHUTDOWN     0
1147 #define RESET_KIND_INIT         1
1148 #define RESET_KIND_SUSPEND      2
1149
1150 static void tg3_write_sig_post_reset(struct tg3 *, int);
1151 static int tg3_halt_cpu(struct tg3 *, u32);
1152 static int tg3_nvram_lock(struct tg3 *);
1153 static void tg3_nvram_unlock(struct tg3 *);
1154
1155 static void tg3_power_down_phy(struct tg3 *tp)
1156 {
1157         /* The PHY should not be powered down on some chips because
1158          * of bugs.
1159          */
1160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1162             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1163              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1164                 return;
1165         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1166 }
1167
1168 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1169 {
1170         u32 misc_host_ctrl;
1171         u16 power_control, power_caps;
1172         int pm = tp->pm_cap;
1173
1174         /* Make sure register accesses (indirect or otherwise)
1175          * will function correctly.
1176          */
1177         pci_write_config_dword(tp->pdev,
1178                                TG3PCI_MISC_HOST_CTRL,
1179                                tp->misc_host_ctrl);
1180
1181         pci_read_config_word(tp->pdev,
1182                              pm + PCI_PM_CTRL,
1183                              &power_control);
1184         power_control |= PCI_PM_CTRL_PME_STATUS;
1185         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1186         switch (state) {
1187         case PCI_D0:
1188                 power_control |= 0;
1189                 pci_write_config_word(tp->pdev,
1190                                       pm + PCI_PM_CTRL,
1191                                       power_control);
1192                 udelay(100);    /* Delay after power state change */
1193
1194                 /* Switch out of Vaux if it is not a LOM */
1195                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1196                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1197
1198                 return 0;
1199
1200         case PCI_D1:
1201                 power_control |= 1;
1202                 break;
1203
1204         case PCI_D2:
1205                 power_control |= 2;
1206                 break;
1207
1208         case PCI_D3hot:
1209                 power_control |= 3;
1210                 break;
1211
1212         default:
1213                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1214                        "requested.\n",
1215                        tp->dev->name, state);
1216                 return -EINVAL;
1217         };
1218
1219         power_control |= PCI_PM_CTRL_PME_ENABLE;
1220
1221         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1222         tw32(TG3PCI_MISC_HOST_CTRL,
1223              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1224
1225         if (tp->link_config.phy_is_low_power == 0) {
1226                 tp->link_config.phy_is_low_power = 1;
1227                 tp->link_config.orig_speed = tp->link_config.speed;
1228                 tp->link_config.orig_duplex = tp->link_config.duplex;
1229                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1230         }
1231
1232         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1233                 tp->link_config.speed = SPEED_10;
1234                 tp->link_config.duplex = DUPLEX_HALF;
1235                 tp->link_config.autoneg = AUTONEG_ENABLE;
1236                 tg3_setup_phy(tp, 0);
1237         }
1238
1239         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1240                 int i;
1241                 u32 val;
1242
1243                 for (i = 0; i < 200; i++) {
1244                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1245                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1246                                 break;
1247                         msleep(1);
1248                 }
1249         }
1250         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1251                                              WOL_DRV_STATE_SHUTDOWN |
1252                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1253
1254         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1255
1256         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1257                 u32 mac_mode;
1258
1259                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1260                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1261                         udelay(40);
1262
1263                         mac_mode = MAC_MODE_PORT_MODE_MII;
1264
1265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1266                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1267                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1268                 } else {
1269                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1270                 }
1271
1272                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1273                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1274
1275                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1276                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1277                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1278
1279                 tw32_f(MAC_MODE, mac_mode);
1280                 udelay(100);
1281
1282                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1283                 udelay(10);
1284         }
1285
1286         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1287             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1288              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1289                 u32 base_val;
1290
1291                 base_val = tp->pci_clock_ctrl;
1292                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1293                              CLOCK_CTRL_TXCLK_DISABLE);
1294
1295                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1296                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1297         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1298                 /* do nothing */
1299         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1300                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1301                 u32 newbits1, newbits2;
1302
1303                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1305                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1306                                     CLOCK_CTRL_TXCLK_DISABLE |
1307                                     CLOCK_CTRL_ALTCLK);
1308                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1309                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1310                         newbits1 = CLOCK_CTRL_625_CORE;
1311                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1312                 } else {
1313                         newbits1 = CLOCK_CTRL_ALTCLK;
1314                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1315                 }
1316
1317                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1318                             40);
1319
1320                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1321                             40);
1322
1323                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1324                         u32 newbits3;
1325
1326                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1327                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1328                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1329                                             CLOCK_CTRL_TXCLK_DISABLE |
1330                                             CLOCK_CTRL_44MHZ_CORE);
1331                         } else {
1332                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1333                         }
1334
1335                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1336                                     tp->pci_clock_ctrl | newbits3, 40);
1337                 }
1338         }
1339
1340         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1341             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1342                 /* Turn off the PHY */
1343                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1344                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1345                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1346                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1347                         tg3_power_down_phy(tp);
1348                 }
1349         }
1350
1351         tg3_frob_aux_power(tp);
1352
1353         /* Workaround for unstable PLL clock */
1354         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1355             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1356                 u32 val = tr32(0x7d00);
1357
1358                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1359                 tw32(0x7d00, val);
1360                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1361                         int err;
1362
1363                         err = tg3_nvram_lock(tp);
1364                         tg3_halt_cpu(tp, RX_CPU_BASE);
1365                         if (!err)
1366                                 tg3_nvram_unlock(tp);
1367                 }
1368         }
1369
1370         /* Finally, set the new power state. */
1371         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1372         udelay(100);    /* Delay after power state change */
1373
1374         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1375
1376         return 0;
1377 }
1378
1379 static void tg3_link_report(struct tg3 *tp)
1380 {
1381         if (!netif_carrier_ok(tp->dev)) {
1382                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1383         } else {
1384                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1385                        tp->dev->name,
1386                        (tp->link_config.active_speed == SPEED_1000 ?
1387                         1000 :
1388                         (tp->link_config.active_speed == SPEED_100 ?
1389                          100 : 10)),
1390                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1391                         "full" : "half"));
1392
1393                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1394                        "%s for RX.\n",
1395                        tp->dev->name,
1396                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1397                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1398         }
1399 }
1400
1401 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1402 {
1403         u32 new_tg3_flags = 0;
1404         u32 old_rx_mode = tp->rx_mode;
1405         u32 old_tx_mode = tp->tx_mode;
1406
1407         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1408
1409                 /* Convert 1000BaseX flow control bits to 1000BaseT
1410                  * bits before resolving flow control.
1411                  */
1412                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1413                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1414                                        ADVERTISE_PAUSE_ASYM);
1415                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1416
1417                         if (local_adv & ADVERTISE_1000XPAUSE)
1418                                 local_adv |= ADVERTISE_PAUSE_CAP;
1419                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1420                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1421                         if (remote_adv & LPA_1000XPAUSE)
1422                                 remote_adv |= LPA_PAUSE_CAP;
1423                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1424                                 remote_adv |= LPA_PAUSE_ASYM;
1425                 }
1426
1427                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1428                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1429                                 if (remote_adv & LPA_PAUSE_CAP)
1430                                         new_tg3_flags |=
1431                                                 (TG3_FLAG_RX_PAUSE |
1432                                                 TG3_FLAG_TX_PAUSE);
1433                                 else if (remote_adv & LPA_PAUSE_ASYM)
1434                                         new_tg3_flags |=
1435                                                 (TG3_FLAG_RX_PAUSE);
1436                         } else {
1437                                 if (remote_adv & LPA_PAUSE_CAP)
1438                                         new_tg3_flags |=
1439                                                 (TG3_FLAG_RX_PAUSE |
1440                                                 TG3_FLAG_TX_PAUSE);
1441                         }
1442                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1443                         if ((remote_adv & LPA_PAUSE_CAP) &&
1444                         (remote_adv & LPA_PAUSE_ASYM))
1445                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1446                 }
1447
1448                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1449                 tp->tg3_flags |= new_tg3_flags;
1450         } else {
1451                 new_tg3_flags = tp->tg3_flags;
1452         }
1453
1454         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1455                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1456         else
1457                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1458
1459         if (old_rx_mode != tp->rx_mode) {
1460                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1461         }
1462         
1463         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1464                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1465         else
1466                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1467
1468         if (old_tx_mode != tp->tx_mode) {
1469                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1470         }
1471 }
1472
1473 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1474 {
1475         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1476         case MII_TG3_AUX_STAT_10HALF:
1477                 *speed = SPEED_10;
1478                 *duplex = DUPLEX_HALF;
1479                 break;
1480
1481         case MII_TG3_AUX_STAT_10FULL:
1482                 *speed = SPEED_10;
1483                 *duplex = DUPLEX_FULL;
1484                 break;
1485
1486         case MII_TG3_AUX_STAT_100HALF:
1487                 *speed = SPEED_100;
1488                 *duplex = DUPLEX_HALF;
1489                 break;
1490
1491         case MII_TG3_AUX_STAT_100FULL:
1492                 *speed = SPEED_100;
1493                 *duplex = DUPLEX_FULL;
1494                 break;
1495
1496         case MII_TG3_AUX_STAT_1000HALF:
1497                 *speed = SPEED_1000;
1498                 *duplex = DUPLEX_HALF;
1499                 break;
1500
1501         case MII_TG3_AUX_STAT_1000FULL:
1502                 *speed = SPEED_1000;
1503                 *duplex = DUPLEX_FULL;
1504                 break;
1505
1506         default:
1507                 *speed = SPEED_INVALID;
1508                 *duplex = DUPLEX_INVALID;
1509                 break;
1510         };
1511 }
1512
1513 static void tg3_phy_copper_begin(struct tg3 *tp)
1514 {
1515         u32 new_adv;
1516         int i;
1517
1518         if (tp->link_config.phy_is_low_power) {
1519                 /* Entering low power mode.  Disable gigabit and
1520                  * 100baseT advertisements.
1521                  */
1522                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1523
1524                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1525                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1526                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1527                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1528
1529                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1530         } else if (tp->link_config.speed == SPEED_INVALID) {
1531                 tp->link_config.advertising =
1532                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1533                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1534                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1535                          ADVERTISED_Autoneg | ADVERTISED_MII);
1536
1537                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1538                         tp->link_config.advertising &=
1539                                 ~(ADVERTISED_1000baseT_Half |
1540                                   ADVERTISED_1000baseT_Full);
1541
1542                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1543                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1544                         new_adv |= ADVERTISE_10HALF;
1545                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1546                         new_adv |= ADVERTISE_10FULL;
1547                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1548                         new_adv |= ADVERTISE_100HALF;
1549                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1550                         new_adv |= ADVERTISE_100FULL;
1551                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552
1553                 if (tp->link_config.advertising &
1554                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1555                         new_adv = 0;
1556                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1557                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1558                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1559                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1560                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1561                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1562                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1563                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1564                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1565                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1566                 } else {
1567                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1568                 }
1569         } else {
1570                 /* Asking for a specific link mode. */
1571                 if (tp->link_config.speed == SPEED_1000) {
1572                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1573                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575                         if (tp->link_config.duplex == DUPLEX_FULL)
1576                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1577                         else
1578                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1580                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1581                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1582                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1583                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1584                 } else {
1585                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1586
1587                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1588                         if (tp->link_config.speed == SPEED_100) {
1589                                 if (tp->link_config.duplex == DUPLEX_FULL)
1590                                         new_adv |= ADVERTISE_100FULL;
1591                                 else
1592                                         new_adv |= ADVERTISE_100HALF;
1593                         } else {
1594                                 if (tp->link_config.duplex == DUPLEX_FULL)
1595                                         new_adv |= ADVERTISE_10FULL;
1596                                 else
1597                                         new_adv |= ADVERTISE_10HALF;
1598                         }
1599                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1600                 }
1601         }
1602
1603         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1604             tp->link_config.speed != SPEED_INVALID) {
1605                 u32 bmcr, orig_bmcr;
1606
1607                 tp->link_config.active_speed = tp->link_config.speed;
1608                 tp->link_config.active_duplex = tp->link_config.duplex;
1609
1610                 bmcr = 0;
1611                 switch (tp->link_config.speed) {
1612                 default:
1613                 case SPEED_10:
1614                         break;
1615
1616                 case SPEED_100:
1617                         bmcr |= BMCR_SPEED100;
1618                         break;
1619
1620                 case SPEED_1000:
1621                         bmcr |= TG3_BMCR_SPEED1000;
1622                         break;
1623                 };
1624
1625                 if (tp->link_config.duplex == DUPLEX_FULL)
1626                         bmcr |= BMCR_FULLDPLX;
1627
1628                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1629                     (bmcr != orig_bmcr)) {
1630                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1631                         for (i = 0; i < 1500; i++) {
1632                                 u32 tmp;
1633
1634                                 udelay(10);
1635                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1636                                     tg3_readphy(tp, MII_BMSR, &tmp))
1637                                         continue;
1638                                 if (!(tmp & BMSR_LSTATUS)) {
1639                                         udelay(40);
1640                                         break;
1641                                 }
1642                         }
1643                         tg3_writephy(tp, MII_BMCR, bmcr);
1644                         udelay(40);
1645                 }
1646         } else {
1647                 tg3_writephy(tp, MII_BMCR,
1648                              BMCR_ANENABLE | BMCR_ANRESTART);
1649         }
1650 }
1651
1652 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1653 {
1654         int err;
1655
1656         /* Turn off tap power management. */
1657         /* Set Extended packet length bit */
1658         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1659
1660         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1661         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1662
1663         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1664         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1665
1666         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1667         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1668
1669         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1670         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1671
1672         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1673         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1674
1675         udelay(40);
1676
1677         return err;
1678 }
1679
1680 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1681 {
1682         u32 adv_reg, all_mask;
1683
1684         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1685                 return 0;
1686
1687         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1688                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1689         if ((adv_reg & all_mask) != all_mask)
1690                 return 0;
1691         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1692                 u32 tg3_ctrl;
1693
1694                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1695                         return 0;
1696
1697                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1698                             MII_TG3_CTRL_ADV_1000_FULL);
1699                 if ((tg3_ctrl & all_mask) != all_mask)
1700                         return 0;
1701         }
1702         return 1;
1703 }
1704
1705 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1706 {
1707         int current_link_up;
1708         u32 bmsr, dummy;
1709         u16 current_speed;
1710         u8 current_duplex;
1711         int i, err;
1712
1713         tw32(MAC_EVENT, 0);
1714
1715         tw32_f(MAC_STATUS,
1716              (MAC_STATUS_SYNC_CHANGED |
1717               MAC_STATUS_CFG_CHANGED |
1718               MAC_STATUS_MI_COMPLETION |
1719               MAC_STATUS_LNKSTATE_CHANGED));
1720         udelay(40);
1721
1722         tp->mi_mode = MAC_MI_MODE_BASE;
1723         tw32_f(MAC_MI_MODE, tp->mi_mode);
1724         udelay(80);
1725
1726         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1727
1728         /* Some third-party PHYs need to be reset on link going
1729          * down.
1730          */
1731         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1733              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1734             netif_carrier_ok(tp->dev)) {
1735                 tg3_readphy(tp, MII_BMSR, &bmsr);
1736                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1737                     !(bmsr & BMSR_LSTATUS))
1738                         force_reset = 1;
1739         }
1740         if (force_reset)
1741                 tg3_phy_reset(tp);
1742
1743         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1744                 tg3_readphy(tp, MII_BMSR, &bmsr);
1745                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1746                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1747                         bmsr = 0;
1748
1749                 if (!(bmsr & BMSR_LSTATUS)) {
1750                         err = tg3_init_5401phy_dsp(tp);
1751                         if (err)
1752                                 return err;
1753
1754                         tg3_readphy(tp, MII_BMSR, &bmsr);
1755                         for (i = 0; i < 1000; i++) {
1756                                 udelay(10);
1757                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                                     (bmsr & BMSR_LSTATUS)) {
1759                                         udelay(40);
1760                                         break;
1761                                 }
1762                         }
1763
1764                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1765                             !(bmsr & BMSR_LSTATUS) &&
1766                             tp->link_config.active_speed == SPEED_1000) {
1767                                 err = tg3_phy_reset(tp);
1768                                 if (!err)
1769                                         err = tg3_init_5401phy_dsp(tp);
1770                                 if (err)
1771                                         return err;
1772                         }
1773                 }
1774         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1775                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1776                 /* 5701 {A0,B0} CRC bug workaround */
1777                 tg3_writephy(tp, 0x15, 0x0a75);
1778                 tg3_writephy(tp, 0x1c, 0x8c68);
1779                 tg3_writephy(tp, 0x1c, 0x8d68);
1780                 tg3_writephy(tp, 0x1c, 0x8c68);
1781         }
1782
1783         /* Clear pending interrupts... */
1784         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1785         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1786
1787         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1788                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1789         else
1790                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1794                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1795                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1796                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1797                 else
1798                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1799         }
1800
1801         current_link_up = 0;
1802         current_speed = SPEED_INVALID;
1803         current_duplex = DUPLEX_INVALID;
1804
1805         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1806                 u32 val;
1807
1808                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1809                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1810                 if (!(val & (1 << 10))) {
1811                         val |= (1 << 10);
1812                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1813                         goto relink;
1814                 }
1815         }
1816
1817         bmsr = 0;
1818         for (i = 0; i < 100; i++) {
1819                 tg3_readphy(tp, MII_BMSR, &bmsr);
1820                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1821                     (bmsr & BMSR_LSTATUS))
1822                         break;
1823                 udelay(40);
1824         }
1825
1826         if (bmsr & BMSR_LSTATUS) {
1827                 u32 aux_stat, bmcr;
1828
1829                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1830                 for (i = 0; i < 2000; i++) {
1831                         udelay(10);
1832                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1833                             aux_stat)
1834                                 break;
1835                 }
1836
1837                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1838                                              &current_speed,
1839                                              &current_duplex);
1840
1841                 bmcr = 0;
1842                 for (i = 0; i < 200; i++) {
1843                         tg3_readphy(tp, MII_BMCR, &bmcr);
1844                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1845                                 continue;
1846                         if (bmcr && bmcr != 0x7fff)
1847                                 break;
1848                         udelay(10);
1849                 }
1850
1851                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1852                         if (bmcr & BMCR_ANENABLE) {
1853                                 current_link_up = 1;
1854
1855                                 /* Force autoneg restart if we are exiting
1856                                  * low power mode.
1857                                  */
1858                                 if (!tg3_copper_is_advertising_all(tp))
1859                                         current_link_up = 0;
1860                         } else {
1861                                 current_link_up = 0;
1862                         }
1863                 } else {
1864                         if (!(bmcr & BMCR_ANENABLE) &&
1865                             tp->link_config.speed == current_speed &&
1866                             tp->link_config.duplex == current_duplex) {
1867                                 current_link_up = 1;
1868                         } else {
1869                                 current_link_up = 0;
1870                         }
1871                 }
1872
1873                 tp->link_config.active_speed = current_speed;
1874                 tp->link_config.active_duplex = current_duplex;
1875         }
1876
1877         if (current_link_up == 1 &&
1878             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1879             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1880                 u32 local_adv, remote_adv;
1881
1882                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1883                         local_adv = 0;
1884                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1885
1886                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1887                         remote_adv = 0;
1888
1889                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1890
1891                 /* If we are not advertising full pause capability,
1892                  * something is wrong.  Bring the link down and reconfigure.
1893                  */
1894                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1895                         current_link_up = 0;
1896                 } else {
1897                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1898                 }
1899         }
1900 relink:
1901         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1902                 u32 tmp;
1903
1904                 tg3_phy_copper_begin(tp);
1905
1906                 tg3_readphy(tp, MII_BMSR, &tmp);
1907                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1908                     (tmp & BMSR_LSTATUS))
1909                         current_link_up = 1;
1910         }
1911
1912         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1913         if (current_link_up == 1) {
1914                 if (tp->link_config.active_speed == SPEED_100 ||
1915                     tp->link_config.active_speed == SPEED_10)
1916                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1917                 else
1918                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1919         } else
1920                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1921
1922         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1923         if (tp->link_config.active_duplex == DUPLEX_HALF)
1924                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1925
1926         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1928                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1929                     (current_link_up == 1 &&
1930                      tp->link_config.active_speed == SPEED_10))
1931                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1932         } else {
1933                 if (current_link_up == 1)
1934                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1935         }
1936
1937         /* ??? Without this setting Netgear GA302T PHY does not
1938          * ??? send/receive packets...
1939          */
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1941             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1942                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1943                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1944                 udelay(80);
1945         }
1946
1947         tw32_f(MAC_MODE, tp->mac_mode);
1948         udelay(40);
1949
1950         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1951                 /* Polled via timer. */
1952                 tw32_f(MAC_EVENT, 0);
1953         } else {
1954                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1955         }
1956         udelay(40);
1957
1958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1959             current_link_up == 1 &&
1960             tp->link_config.active_speed == SPEED_1000 &&
1961             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1962              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1963                 udelay(120);
1964                 tw32_f(MAC_STATUS,
1965                      (MAC_STATUS_SYNC_CHANGED |
1966                       MAC_STATUS_CFG_CHANGED));
1967                 udelay(40);
1968                 tg3_write_mem(tp,
1969                               NIC_SRAM_FIRMWARE_MBOX,
1970                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1971         }
1972
1973         if (current_link_up != netif_carrier_ok(tp->dev)) {
1974                 if (current_link_up)
1975                         netif_carrier_on(tp->dev);
1976                 else
1977                         netif_carrier_off(tp->dev);
1978                 tg3_link_report(tp);
1979         }
1980
1981         return 0;
1982 }
1983
1984 struct tg3_fiber_aneginfo {
1985         int state;
1986 #define ANEG_STATE_UNKNOWN              0
1987 #define ANEG_STATE_AN_ENABLE            1
1988 #define ANEG_STATE_RESTART_INIT         2
1989 #define ANEG_STATE_RESTART              3
1990 #define ANEG_STATE_DISABLE_LINK_OK      4
1991 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1992 #define ANEG_STATE_ABILITY_DETECT       6
1993 #define ANEG_STATE_ACK_DETECT_INIT      7
1994 #define ANEG_STATE_ACK_DETECT           8
1995 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1996 #define ANEG_STATE_COMPLETE_ACK         10
1997 #define ANEG_STATE_IDLE_DETECT_INIT     11
1998 #define ANEG_STATE_IDLE_DETECT          12
1999 #define ANEG_STATE_LINK_OK              13
2000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2001 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2002
2003         u32 flags;
2004 #define MR_AN_ENABLE            0x00000001
2005 #define MR_RESTART_AN           0x00000002
2006 #define MR_AN_COMPLETE          0x00000004
2007 #define MR_PAGE_RX              0x00000008
2008 #define MR_NP_LOADED            0x00000010
2009 #define MR_TOGGLE_TX            0x00000020
2010 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2011 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2012 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2013 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2016 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2017 #define MR_TOGGLE_RX            0x00002000
2018 #define MR_NP_RX                0x00004000
2019
2020 #define MR_LINK_OK              0x80000000
2021
2022         unsigned long link_time, cur_time;
2023
2024         u32 ability_match_cfg;
2025         int ability_match_count;
2026
2027         char ability_match, idle_match, ack_match;
2028
2029         u32 txconfig, rxconfig;
2030 #define ANEG_CFG_NP             0x00000080
2031 #define ANEG_CFG_ACK            0x00000040
2032 #define ANEG_CFG_RF2            0x00000020
2033 #define ANEG_CFG_RF1            0x00000010
2034 #define ANEG_CFG_PS2            0x00000001
2035 #define ANEG_CFG_PS1            0x00008000
2036 #define ANEG_CFG_HD             0x00004000
2037 #define ANEG_CFG_FD             0x00002000
2038 #define ANEG_CFG_INVAL          0x00001f06
2039
2040 };
2041 #define ANEG_OK         0
2042 #define ANEG_DONE       1
2043 #define ANEG_TIMER_ENAB 2
2044 #define ANEG_FAILED     -1
2045
2046 #define ANEG_STATE_SETTLE_TIME  10000
2047
2048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2049                                    struct tg3_fiber_aneginfo *ap)
2050 {
2051         unsigned long delta;
2052         u32 rx_cfg_reg;
2053         int ret;
2054
2055         if (ap->state == ANEG_STATE_UNKNOWN) {
2056                 ap->rxconfig = 0;
2057                 ap->link_time = 0;
2058                 ap->cur_time = 0;
2059                 ap->ability_match_cfg = 0;
2060                 ap->ability_match_count = 0;
2061                 ap->ability_match = 0;
2062                 ap->idle_match = 0;
2063                 ap->ack_match = 0;
2064         }
2065         ap->cur_time++;
2066
2067         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2068                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2069
2070                 if (rx_cfg_reg != ap->ability_match_cfg) {
2071                         ap->ability_match_cfg = rx_cfg_reg;
2072                         ap->ability_match = 0;
2073                         ap->ability_match_count = 0;
2074                 } else {
2075                         if (++ap->ability_match_count > 1) {
2076                                 ap->ability_match = 1;
2077                                 ap->ability_match_cfg = rx_cfg_reg;
2078                         }
2079                 }
2080                 if (rx_cfg_reg & ANEG_CFG_ACK)
2081                         ap->ack_match = 1;
2082                 else
2083                         ap->ack_match = 0;
2084
2085                 ap->idle_match = 0;
2086         } else {
2087                 ap->idle_match = 1;
2088                 ap->ability_match_cfg = 0;
2089                 ap->ability_match_count = 0;
2090                 ap->ability_match = 0;
2091                 ap->ack_match = 0;
2092
2093                 rx_cfg_reg = 0;
2094         }
2095
2096         ap->rxconfig = rx_cfg_reg;
2097         ret = ANEG_OK;
2098
2099         switch(ap->state) {
2100         case ANEG_STATE_UNKNOWN:
2101                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2102                         ap->state = ANEG_STATE_AN_ENABLE;
2103
2104                 /* fallthru */
2105         case ANEG_STATE_AN_ENABLE:
2106                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2107                 if (ap->flags & MR_AN_ENABLE) {
2108                         ap->link_time = 0;
2109                         ap->cur_time = 0;
2110                         ap->ability_match_cfg = 0;
2111                         ap->ability_match_count = 0;
2112                         ap->ability_match = 0;
2113                         ap->idle_match = 0;
2114                         ap->ack_match = 0;
2115
2116                         ap->state = ANEG_STATE_RESTART_INIT;
2117                 } else {
2118                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2119                 }
2120                 break;
2121
2122         case ANEG_STATE_RESTART_INIT:
2123                 ap->link_time = ap->cur_time;
2124                 ap->flags &= ~(MR_NP_LOADED);
2125                 ap->txconfig = 0;
2126                 tw32(MAC_TX_AUTO_NEG, 0);
2127                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2128                 tw32_f(MAC_MODE, tp->mac_mode);
2129                 udelay(40);
2130
2131                 ret = ANEG_TIMER_ENAB;
2132                 ap->state = ANEG_STATE_RESTART;
2133
2134                 /* fallthru */
2135         case ANEG_STATE_RESTART:
2136                 delta = ap->cur_time - ap->link_time;
2137                 if (delta > ANEG_STATE_SETTLE_TIME) {
2138                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2139                 } else {
2140                         ret = ANEG_TIMER_ENAB;
2141                 }
2142                 break;
2143
2144         case ANEG_STATE_DISABLE_LINK_OK:
2145                 ret = ANEG_DONE;
2146                 break;
2147
2148         case ANEG_STATE_ABILITY_DETECT_INIT:
2149                 ap->flags &= ~(MR_TOGGLE_TX);
2150                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2151                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2152                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2153                 tw32_f(MAC_MODE, tp->mac_mode);
2154                 udelay(40);
2155
2156                 ap->state = ANEG_STATE_ABILITY_DETECT;
2157                 break;
2158
2159         case ANEG_STATE_ABILITY_DETECT:
2160                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2161                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_ACK_DETECT_INIT:
2166                 ap->txconfig |= ANEG_CFG_ACK;
2167                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2168                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2169                 tw32_f(MAC_MODE, tp->mac_mode);
2170                 udelay(40);
2171
2172                 ap->state = ANEG_STATE_ACK_DETECT;
2173
2174                 /* fallthru */
2175         case ANEG_STATE_ACK_DETECT:
2176                 if (ap->ack_match != 0) {
2177                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2178                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2179                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2180                         } else {
2181                                 ap->state = ANEG_STATE_AN_ENABLE;
2182                         }
2183                 } else if (ap->ability_match != 0 &&
2184                            ap->rxconfig == 0) {
2185                         ap->state = ANEG_STATE_AN_ENABLE;
2186                 }
2187                 break;
2188
2189         case ANEG_STATE_COMPLETE_ACK_INIT:
2190                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2191                         ret = ANEG_FAILED;
2192                         break;
2193                 }
2194                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2195                                MR_LP_ADV_HALF_DUPLEX |
2196                                MR_LP_ADV_SYM_PAUSE |
2197                                MR_LP_ADV_ASYM_PAUSE |
2198                                MR_LP_ADV_REMOTE_FAULT1 |
2199                                MR_LP_ADV_REMOTE_FAULT2 |
2200                                MR_LP_ADV_NEXT_PAGE |
2201                                MR_TOGGLE_RX |
2202                                MR_NP_RX);
2203                 if (ap->rxconfig & ANEG_CFG_FD)
2204                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2205                 if (ap->rxconfig & ANEG_CFG_HD)
2206                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2207                 if (ap->rxconfig & ANEG_CFG_PS1)
2208                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2209                 if (ap->rxconfig & ANEG_CFG_PS2)
2210                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2211                 if (ap->rxconfig & ANEG_CFG_RF1)
2212                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2213                 if (ap->rxconfig & ANEG_CFG_RF2)
2214                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2215                 if (ap->rxconfig & ANEG_CFG_NP)
2216                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2217
2218                 ap->link_time = ap->cur_time;
2219
2220                 ap->flags ^= (MR_TOGGLE_TX);
2221                 if (ap->rxconfig & 0x0008)
2222                         ap->flags |= MR_TOGGLE_RX;
2223                 if (ap->rxconfig & ANEG_CFG_NP)
2224                         ap->flags |= MR_NP_RX;
2225                 ap->flags |= MR_PAGE_RX;
2226
2227                 ap->state = ANEG_STATE_COMPLETE_ACK;
2228                 ret = ANEG_TIMER_ENAB;
2229                 break;
2230
2231         case ANEG_STATE_COMPLETE_ACK:
2232                 if (ap->ability_match != 0 &&
2233                     ap->rxconfig == 0) {
2234                         ap->state = ANEG_STATE_AN_ENABLE;
2235                         break;
2236                 }
2237                 delta = ap->cur_time - ap->link_time;
2238                 if (delta > ANEG_STATE_SETTLE_TIME) {
2239                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2240                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2241                         } else {
2242                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2243                                     !(ap->flags & MR_NP_RX)) {
2244                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2245                                 } else {
2246                                         ret = ANEG_FAILED;
2247                                 }
2248                         }
2249                 }
2250                 break;
2251
2252         case ANEG_STATE_IDLE_DETECT_INIT:
2253                 ap->link_time = ap->cur_time;
2254                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2255                 tw32_f(MAC_MODE, tp->mac_mode);
2256                 udelay(40);
2257
2258                 ap->state = ANEG_STATE_IDLE_DETECT;
2259                 ret = ANEG_TIMER_ENAB;
2260                 break;
2261
2262         case ANEG_STATE_IDLE_DETECT:
2263                 if (ap->ability_match != 0 &&
2264                     ap->rxconfig == 0) {
2265                         ap->state = ANEG_STATE_AN_ENABLE;
2266                         break;
2267                 }
2268                 delta = ap->cur_time - ap->link_time;
2269                 if (delta > ANEG_STATE_SETTLE_TIME) {
2270                         /* XXX another gem from the Broadcom driver :( */
2271                         ap->state = ANEG_STATE_LINK_OK;
2272                 }
2273                 break;
2274
2275         case ANEG_STATE_LINK_OK:
2276                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2277                 ret = ANEG_DONE;
2278                 break;
2279
2280         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2281                 /* ??? unimplemented */
2282                 break;
2283
2284         case ANEG_STATE_NEXT_PAGE_WAIT:
2285                 /* ??? unimplemented */
2286                 break;
2287
2288         default:
2289                 ret = ANEG_FAILED;
2290                 break;
2291         };
2292
2293         return ret;
2294 }
2295
2296 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2297 {
2298         int res = 0;
2299         struct tg3_fiber_aneginfo aninfo;
2300         int status = ANEG_FAILED;
2301         unsigned int tick;
2302         u32 tmp;
2303
2304         tw32_f(MAC_TX_AUTO_NEG, 0);
2305
2306         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2307         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2308         udelay(40);
2309
2310         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2311         udelay(40);
2312
2313         memset(&aninfo, 0, sizeof(aninfo));
2314         aninfo.flags |= MR_AN_ENABLE;
2315         aninfo.state = ANEG_STATE_UNKNOWN;
2316         aninfo.cur_time = 0;
2317         tick = 0;
2318         while (++tick < 195000) {
2319                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2320                 if (status == ANEG_DONE || status == ANEG_FAILED)
2321                         break;
2322
2323                 udelay(1);
2324         }
2325
2326         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2327         tw32_f(MAC_MODE, tp->mac_mode);
2328         udelay(40);
2329
2330         *flags = aninfo.flags;
2331
2332         if (status == ANEG_DONE &&
2333             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2334                              MR_LP_ADV_FULL_DUPLEX)))
2335                 res = 1;
2336
2337         return res;
2338 }
2339
2340 static void tg3_init_bcm8002(struct tg3 *tp)
2341 {
2342         u32 mac_status = tr32(MAC_STATUS);
2343         int i;
2344
2345         /* Reset when initting first time or we have a link. */
2346         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2347             !(mac_status & MAC_STATUS_PCS_SYNCED))
2348                 return;
2349
2350         /* Set PLL lock range. */
2351         tg3_writephy(tp, 0x16, 0x8007);
2352
2353         /* SW reset */
2354         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2355
2356         /* Wait for reset to complete. */
2357         /* XXX schedule_timeout() ... */
2358         for (i = 0; i < 500; i++)
2359                 udelay(10);
2360
2361         /* Config mode; select PMA/Ch 1 regs. */
2362         tg3_writephy(tp, 0x10, 0x8411);
2363
2364         /* Enable auto-lock and comdet, select txclk for tx. */
2365         tg3_writephy(tp, 0x11, 0x0a10);
2366
2367         tg3_writephy(tp, 0x18, 0x00a0);
2368         tg3_writephy(tp, 0x16, 0x41ff);
2369
2370         /* Assert and deassert POR. */
2371         tg3_writephy(tp, 0x13, 0x0400);
2372         udelay(40);
2373         tg3_writephy(tp, 0x13, 0x0000);
2374
2375         tg3_writephy(tp, 0x11, 0x0a50);
2376         udelay(40);
2377         tg3_writephy(tp, 0x11, 0x0a10);
2378
2379         /* Wait for signal to stabilize */
2380         /* XXX schedule_timeout() ... */
2381         for (i = 0; i < 15000; i++)
2382                 udelay(10);
2383
2384         /* Deselect the channel register so we can read the PHYID
2385          * later.
2386          */
2387         tg3_writephy(tp, 0x10, 0x8011);
2388 }
2389
2390 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2391 {
2392         u32 sg_dig_ctrl, sg_dig_status;
2393         u32 serdes_cfg, expected_sg_dig_ctrl;
2394         int workaround, port_a;
2395         int current_link_up;
2396
2397         serdes_cfg = 0;
2398         expected_sg_dig_ctrl = 0;
2399         workaround = 0;
2400         port_a = 1;
2401         current_link_up = 0;
2402
2403         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2404             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2405                 workaround = 1;
2406                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2407                         port_a = 0;
2408
2409                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2410                 /* preserve bits 20-23 for voltage regulator */
2411                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2412         }
2413
2414         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2415
2416         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2417                 if (sg_dig_ctrl & (1 << 31)) {
2418                         if (workaround) {
2419                                 u32 val = serdes_cfg;
2420
2421                                 if (port_a)
2422                                         val |= 0xc010000;
2423                                 else
2424                                         val |= 0x4010000;
2425                                 tw32_f(MAC_SERDES_CFG, val);
2426                         }
2427                         tw32_f(SG_DIG_CTRL, 0x01388400);
2428                 }
2429                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2430                         tg3_setup_flow_control(tp, 0, 0);
2431                         current_link_up = 1;
2432                 }
2433                 goto out;
2434         }
2435
2436         /* Want auto-negotiation.  */
2437         expected_sg_dig_ctrl = 0x81388400;
2438
2439         /* Pause capability */
2440         expected_sg_dig_ctrl |= (1 << 11);
2441
2442         /* Asymettric pause */
2443         expected_sg_dig_ctrl |= (1 << 12);
2444
2445         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2446                 if (workaround)
2447                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2448                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2449                 udelay(5);
2450                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2451
2452                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2453         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2454                                  MAC_STATUS_SIGNAL_DET)) {
2455                 int i;
2456
2457                 /* Giver time to negotiate (~200ms) */
2458                 for (i = 0; i < 40000; i++) {
2459                         sg_dig_status = tr32(SG_DIG_STATUS);
2460                         if (sg_dig_status & (0x3))
2461                                 break;
2462                         udelay(5);
2463                 }
2464                 mac_status = tr32(MAC_STATUS);
2465
2466                 if ((sg_dig_status & (1 << 1)) &&
2467                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2468                         u32 local_adv, remote_adv;
2469
2470                         local_adv = ADVERTISE_PAUSE_CAP;
2471                         remote_adv = 0;
2472                         if (sg_dig_status & (1 << 19))
2473                                 remote_adv |= LPA_PAUSE_CAP;
2474                         if (sg_dig_status & (1 << 20))
2475                                 remote_adv |= LPA_PAUSE_ASYM;
2476
2477                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2478                         current_link_up = 1;
2479                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2480                 } else if (!(sg_dig_status & (1 << 1))) {
2481                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2482                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2483                         else {
2484                                 if (workaround) {
2485                                         u32 val = serdes_cfg;
2486
2487                                         if (port_a)
2488                                                 val |= 0xc010000;
2489                                         else
2490                                                 val |= 0x4010000;
2491
2492                                         tw32_f(MAC_SERDES_CFG, val);
2493                                 }
2494
2495                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2496                                 udelay(40);
2497
2498                                 /* Link parallel detection - link is up */
2499                                 /* only if we have PCS_SYNC and not */
2500                                 /* receiving config code words */
2501                                 mac_status = tr32(MAC_STATUS);
2502                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2503                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2504                                         tg3_setup_flow_control(tp, 0, 0);
2505                                         current_link_up = 1;
2506                                 }
2507                         }
2508                 }
2509         }
2510
2511 out:
2512         return current_link_up;
2513 }
2514
2515 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2516 {
2517         int current_link_up = 0;
2518
2519         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2520                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2521                 goto out;
2522         }
2523
2524         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2525                 u32 flags;
2526                 int i;
2527   
2528                 if (fiber_autoneg(tp, &flags)) {
2529                         u32 local_adv, remote_adv;
2530
2531                         local_adv = ADVERTISE_PAUSE_CAP;
2532                         remote_adv = 0;
2533                         if (flags & MR_LP_ADV_SYM_PAUSE)
2534                                 remote_adv |= LPA_PAUSE_CAP;
2535                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2536                                 remote_adv |= LPA_PAUSE_ASYM;
2537
2538                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2539
2540                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2541                         current_link_up = 1;
2542                 }
2543                 for (i = 0; i < 30; i++) {
2544                         udelay(20);
2545                         tw32_f(MAC_STATUS,
2546                                (MAC_STATUS_SYNC_CHANGED |
2547                                 MAC_STATUS_CFG_CHANGED));
2548                         udelay(40);
2549                         if ((tr32(MAC_STATUS) &
2550                              (MAC_STATUS_SYNC_CHANGED |
2551                               MAC_STATUS_CFG_CHANGED)) == 0)
2552                                 break;
2553                 }
2554
2555                 mac_status = tr32(MAC_STATUS);
2556                 if (current_link_up == 0 &&
2557                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2558                     !(mac_status & MAC_STATUS_RCVD_CFG))
2559                         current_link_up = 1;
2560         } else {
2561                 /* Forcing 1000FD link up. */
2562                 current_link_up = 1;
2563                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2564
2565                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2566                 udelay(40);
2567         }
2568
2569 out:
2570         return current_link_up;
2571 }
2572
2573 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2574 {
2575         u32 orig_pause_cfg;
2576         u16 orig_active_speed;
2577         u8 orig_active_duplex;
2578         u32 mac_status;
2579         int current_link_up;
2580         int i;
2581
2582         orig_pause_cfg =
2583                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2584                                   TG3_FLAG_TX_PAUSE));
2585         orig_active_speed = tp->link_config.active_speed;
2586         orig_active_duplex = tp->link_config.active_duplex;
2587
2588         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2589             netif_carrier_ok(tp->dev) &&
2590             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2591                 mac_status = tr32(MAC_STATUS);
2592                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2593                                MAC_STATUS_SIGNAL_DET |
2594                                MAC_STATUS_CFG_CHANGED |
2595                                MAC_STATUS_RCVD_CFG);
2596                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2597                                    MAC_STATUS_SIGNAL_DET)) {
2598                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2599                                             MAC_STATUS_CFG_CHANGED));
2600                         return 0;
2601                 }
2602         }
2603
2604         tw32_f(MAC_TX_AUTO_NEG, 0);
2605
2606         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2607         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         if (tp->phy_id == PHY_ID_BCM8002)
2612                 tg3_init_bcm8002(tp);
2613
2614         /* Enable link change event even when serdes polling.  */
2615         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2616         udelay(40);
2617
2618         current_link_up = 0;
2619         mac_status = tr32(MAC_STATUS);
2620
2621         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2622                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2623         else
2624                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2625
2626         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2627         tw32_f(MAC_MODE, tp->mac_mode);
2628         udelay(40);
2629
2630         tp->hw_status->status =
2631                 (SD_STATUS_UPDATED |
2632                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2633
2634         for (i = 0; i < 100; i++) {
2635                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2636                                     MAC_STATUS_CFG_CHANGED));
2637                 udelay(5);
2638                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2639                                          MAC_STATUS_CFG_CHANGED)) == 0)
2640                         break;
2641         }
2642
2643         mac_status = tr32(MAC_STATUS);
2644         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2645                 current_link_up = 0;
2646                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2647                         tw32_f(MAC_MODE, (tp->mac_mode |
2648                                           MAC_MODE_SEND_CONFIGS));
2649                         udelay(1);
2650                         tw32_f(MAC_MODE, tp->mac_mode);
2651                 }
2652         }
2653
2654         if (current_link_up == 1) {
2655                 tp->link_config.active_speed = SPEED_1000;
2656                 tp->link_config.active_duplex = DUPLEX_FULL;
2657                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2658                                     LED_CTRL_LNKLED_OVERRIDE |
2659                                     LED_CTRL_1000MBPS_ON));
2660         } else {
2661                 tp->link_config.active_speed = SPEED_INVALID;
2662                 tp->link_config.active_duplex = DUPLEX_INVALID;
2663                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2664                                     LED_CTRL_LNKLED_OVERRIDE |
2665                                     LED_CTRL_TRAFFIC_OVERRIDE));
2666         }
2667
2668         if (current_link_up != netif_carrier_ok(tp->dev)) {
2669                 if (current_link_up)
2670                         netif_carrier_on(tp->dev);
2671                 else
2672                         netif_carrier_off(tp->dev);
2673                 tg3_link_report(tp);
2674         } else {
2675                 u32 now_pause_cfg =
2676                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2677                                          TG3_FLAG_TX_PAUSE);
2678                 if (orig_pause_cfg != now_pause_cfg ||
2679                     orig_active_speed != tp->link_config.active_speed ||
2680                     orig_active_duplex != tp->link_config.active_duplex)
2681                         tg3_link_report(tp);
2682         }
2683
2684         return 0;
2685 }
2686
2687 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2688 {
2689         int current_link_up, err = 0;
2690         u32 bmsr, bmcr;
2691         u16 current_speed;
2692         u8 current_duplex;
2693
2694         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2695         tw32_f(MAC_MODE, tp->mac_mode);
2696         udelay(40);
2697
2698         tw32(MAC_EVENT, 0);
2699
2700         tw32_f(MAC_STATUS,
2701              (MAC_STATUS_SYNC_CHANGED |
2702               MAC_STATUS_CFG_CHANGED |
2703               MAC_STATUS_MI_COMPLETION |
2704               MAC_STATUS_LNKSTATE_CHANGED));
2705         udelay(40);
2706
2707         if (force_reset)
2708                 tg3_phy_reset(tp);
2709
2710         current_link_up = 0;
2711         current_speed = SPEED_INVALID;
2712         current_duplex = DUPLEX_INVALID;
2713
2714         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2715         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2717                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2718                         bmsr |= BMSR_LSTATUS;
2719                 else
2720                         bmsr &= ~BMSR_LSTATUS;
2721         }
2722
2723         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2724
2725         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2726             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2727                 /* do nothing, just check for link up at the end */
2728         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2729                 u32 adv, new_adv;
2730
2731                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2732                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2733                                   ADVERTISE_1000XPAUSE |
2734                                   ADVERTISE_1000XPSE_ASYM |
2735                                   ADVERTISE_SLCT);
2736
2737                 /* Always advertise symmetric PAUSE just like copper */
2738                 new_adv |= ADVERTISE_1000XPAUSE;
2739
2740                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2741                         new_adv |= ADVERTISE_1000XHALF;
2742                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2743                         new_adv |= ADVERTISE_1000XFULL;
2744
2745                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2746                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2747                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2748                         tg3_writephy(tp, MII_BMCR, bmcr);
2749
2750                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2751                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2753
2754                         return err;
2755                 }
2756         } else {
2757                 u32 new_bmcr;
2758
2759                 bmcr &= ~BMCR_SPEED1000;
2760                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2761
2762                 if (tp->link_config.duplex == DUPLEX_FULL)
2763                         new_bmcr |= BMCR_FULLDPLX;
2764
2765                 if (new_bmcr != bmcr) {
2766                         /* BMCR_SPEED1000 is a reserved bit that needs
2767                          * to be set on write.
2768                          */
2769                         new_bmcr |= BMCR_SPEED1000;
2770
2771                         /* Force a linkdown */
2772                         if (netif_carrier_ok(tp->dev)) {
2773                                 u32 adv;
2774
2775                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2776                                 adv &= ~(ADVERTISE_1000XFULL |
2777                                          ADVERTISE_1000XHALF |
2778                                          ADVERTISE_SLCT);
2779                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2780                                 tg3_writephy(tp, MII_BMCR, bmcr |
2781                                                            BMCR_ANRESTART |
2782                                                            BMCR_ANENABLE);
2783                                 udelay(10);
2784                                 netif_carrier_off(tp->dev);
2785                         }
2786                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2787                         bmcr = new_bmcr;
2788                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2789                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2790                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2791                             ASIC_REV_5714) {
2792                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2793                                         bmsr |= BMSR_LSTATUS;
2794                                 else
2795                                         bmsr &= ~BMSR_LSTATUS;
2796                         }
2797                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2798                 }
2799         }
2800
2801         if (bmsr & BMSR_LSTATUS) {
2802                 current_speed = SPEED_1000;
2803                 current_link_up = 1;
2804                 if (bmcr & BMCR_FULLDPLX)
2805                         current_duplex = DUPLEX_FULL;
2806                 else
2807                         current_duplex = DUPLEX_HALF;
2808
2809                 if (bmcr & BMCR_ANENABLE) {
2810                         u32 local_adv, remote_adv, common;
2811
2812                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2813                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2814                         common = local_adv & remote_adv;
2815                         if (common & (ADVERTISE_1000XHALF |
2816                                       ADVERTISE_1000XFULL)) {
2817                                 if (common & ADVERTISE_1000XFULL)
2818                                         current_duplex = DUPLEX_FULL;
2819                                 else
2820                                         current_duplex = DUPLEX_HALF;
2821
2822                                 tg3_setup_flow_control(tp, local_adv,
2823                                                        remote_adv);
2824                         }
2825                         else
2826                                 current_link_up = 0;
2827                 }
2828         }
2829
2830         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2831         if (tp->link_config.active_duplex == DUPLEX_HALF)
2832                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2833
2834         tw32_f(MAC_MODE, tp->mac_mode);
2835         udelay(40);
2836
2837         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2838
2839         tp->link_config.active_speed = current_speed;
2840         tp->link_config.active_duplex = current_duplex;
2841
2842         if (current_link_up != netif_carrier_ok(tp->dev)) {
2843                 if (current_link_up)
2844                         netif_carrier_on(tp->dev);
2845                 else {
2846                         netif_carrier_off(tp->dev);
2847                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2848                 }
2849                 tg3_link_report(tp);
2850         }
2851         return err;
2852 }
2853
2854 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2855 {
2856         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2857                 /* Give autoneg time to complete. */
2858                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2859                 return;
2860         }
2861         if (!netif_carrier_ok(tp->dev) &&
2862             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2863                 u32 bmcr;
2864
2865                 tg3_readphy(tp, MII_BMCR, &bmcr);
2866                 if (bmcr & BMCR_ANENABLE) {
2867                         u32 phy1, phy2;
2868
2869                         /* Select shadow register 0x1f */
2870                         tg3_writephy(tp, 0x1c, 0x7c00);
2871                         tg3_readphy(tp, 0x1c, &phy1);
2872
2873                         /* Select expansion interrupt status register */
2874                         tg3_writephy(tp, 0x17, 0x0f01);
2875                         tg3_readphy(tp, 0x15, &phy2);
2876                         tg3_readphy(tp, 0x15, &phy2);
2877
2878                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2879                                 /* We have signal detect and not receiving
2880                                  * config code words, link is up by parallel
2881                                  * detection.
2882                                  */
2883
2884                                 bmcr &= ~BMCR_ANENABLE;
2885                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2886                                 tg3_writephy(tp, MII_BMCR, bmcr);
2887                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2888                         }
2889                 }
2890         }
2891         else if (netif_carrier_ok(tp->dev) &&
2892                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2893                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2894                 u32 phy2;
2895
2896                 /* Select expansion interrupt status register */
2897                 tg3_writephy(tp, 0x17, 0x0f01);
2898                 tg3_readphy(tp, 0x15, &phy2);
2899                 if (phy2 & 0x20) {
2900                         u32 bmcr;
2901
2902                         /* Config code words received, turn on autoneg. */
2903                         tg3_readphy(tp, MII_BMCR, &bmcr);
2904                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2905
2906                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2907
2908                 }
2909         }
2910 }
2911
2912 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2913 {
2914         int err;
2915
2916         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2917                 err = tg3_setup_fiber_phy(tp, force_reset);
2918         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2919                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2920         } else {
2921                 err = tg3_setup_copper_phy(tp, force_reset);
2922         }
2923
2924         if (tp->link_config.active_speed == SPEED_1000 &&
2925             tp->link_config.active_duplex == DUPLEX_HALF)
2926                 tw32(MAC_TX_LENGTHS,
2927                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2928                       (6 << TX_LENGTHS_IPG_SHIFT) |
2929                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2930         else
2931                 tw32(MAC_TX_LENGTHS,
2932                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2933                       (6 << TX_LENGTHS_IPG_SHIFT) |
2934                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2935
2936         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2937                 if (netif_carrier_ok(tp->dev)) {
2938                         tw32(HOSTCC_STAT_COAL_TICKS,
2939                              tp->coal.stats_block_coalesce_usecs);
2940                 } else {
2941                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2942                 }
2943         }
2944
2945         return err;
2946 }
2947
2948 /* Tigon3 never reports partial packet sends.  So we do not
2949  * need special logic to handle SKBs that have not had all
2950  * of their frags sent yet, like SunGEM does.
2951  */
2952 static void tg3_tx(struct tg3 *tp)
2953 {
2954         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2955         u32 sw_idx = tp->tx_cons;
2956
2957         while (sw_idx != hw_idx) {
2958                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2959                 struct sk_buff *skb = ri->skb;
2960                 int i;
2961
2962                 if (unlikely(skb == NULL))
2963                         BUG();
2964
2965                 pci_unmap_single(tp->pdev,
2966                                  pci_unmap_addr(ri, mapping),
2967                                  skb_headlen(skb),
2968                                  PCI_DMA_TODEVICE);
2969
2970                 ri->skb = NULL;
2971
2972                 sw_idx = NEXT_TX(sw_idx);
2973
2974                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2975                         if (unlikely(sw_idx == hw_idx))
2976                                 BUG();
2977
2978                         ri = &tp->tx_buffers[sw_idx];
2979                         if (unlikely(ri->skb != NULL))
2980                                 BUG();
2981
2982                         pci_unmap_page(tp->pdev,
2983                                        pci_unmap_addr(ri, mapping),
2984                                        skb_shinfo(skb)->frags[i].size,
2985                                        PCI_DMA_TODEVICE);
2986
2987                         sw_idx = NEXT_TX(sw_idx);
2988                 }
2989
2990                 dev_kfree_skb(skb);
2991         }
2992
2993         tp->tx_cons = sw_idx;
2994
2995         if (unlikely(netif_queue_stopped(tp->dev))) {
2996                 spin_lock(&tp->tx_lock);
2997                 if (netif_queue_stopped(tp->dev) &&
2998                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2999                         netif_wake_queue(tp->dev);
3000                 spin_unlock(&tp->tx_lock);
3001         }
3002 }
3003
3004 /* Returns size of skb allocated or < 0 on error.
3005  *
3006  * We only need to fill in the address because the other members
3007  * of the RX descriptor are invariant, see tg3_init_rings.
3008  *
3009  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3010  * posting buffers we only dirty the first cache line of the RX
3011  * descriptor (containing the address).  Whereas for the RX status
3012  * buffers the cpu only reads the last cacheline of the RX descriptor
3013  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3014  */
3015 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3016                             int src_idx, u32 dest_idx_unmasked)
3017 {
3018         struct tg3_rx_buffer_desc *desc;
3019         struct ring_info *map, *src_map;
3020         struct sk_buff *skb;
3021         dma_addr_t mapping;
3022         int skb_size, dest_idx;
3023
3024         src_map = NULL;
3025         switch (opaque_key) {
3026         case RXD_OPAQUE_RING_STD:
3027                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3028                 desc = &tp->rx_std[dest_idx];
3029                 map = &tp->rx_std_buffers[dest_idx];
3030                 if (src_idx >= 0)
3031                         src_map = &tp->rx_std_buffers[src_idx];
3032                 skb_size = tp->rx_pkt_buf_sz;
3033                 break;
3034
3035         case RXD_OPAQUE_RING_JUMBO:
3036                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3037                 desc = &tp->rx_jumbo[dest_idx];
3038                 map = &tp->rx_jumbo_buffers[dest_idx];
3039                 if (src_idx >= 0)
3040                         src_map = &tp->rx_jumbo_buffers[src_idx];
3041                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3042                 break;
3043
3044         default:
3045                 return -EINVAL;
3046         };
3047
3048         /* Do not overwrite any of the map or rp information
3049          * until we are sure we can commit to a new buffer.
3050          *
3051          * Callers depend upon this behavior and assume that
3052          * we leave everything unchanged if we fail.
3053          */
3054         skb = dev_alloc_skb(skb_size);
3055         if (skb == NULL)
3056                 return -ENOMEM;
3057
3058         skb->dev = tp->dev;
3059         skb_reserve(skb, tp->rx_offset);
3060
3061         mapping = pci_map_single(tp->pdev, skb->data,
3062                                  skb_size - tp->rx_offset,
3063                                  PCI_DMA_FROMDEVICE);
3064
3065         map->skb = skb;
3066         pci_unmap_addr_set(map, mapping, mapping);
3067
3068         if (src_map != NULL)
3069                 src_map->skb = NULL;
3070
3071         desc->addr_hi = ((u64)mapping >> 32);
3072         desc->addr_lo = ((u64)mapping & 0xffffffff);
3073
3074         return skb_size;
3075 }
3076
3077 /* We only need to move over in the address because the other
3078  * members of the RX descriptor are invariant.  See notes above
3079  * tg3_alloc_rx_skb for full details.
3080  */
3081 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3082                            int src_idx, u32 dest_idx_unmasked)
3083 {
3084         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3085         struct ring_info *src_map, *dest_map;
3086         int dest_idx;
3087
3088         switch (opaque_key) {
3089         case RXD_OPAQUE_RING_STD:
3090                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3091                 dest_desc = &tp->rx_std[dest_idx];
3092                 dest_map = &tp->rx_std_buffers[dest_idx];
3093                 src_desc = &tp->rx_std[src_idx];
3094                 src_map = &tp->rx_std_buffers[src_idx];
3095                 break;
3096
3097         case RXD_OPAQUE_RING_JUMBO:
3098                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3099                 dest_desc = &tp->rx_jumbo[dest_idx];
3100                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3101                 src_desc = &tp->rx_jumbo[src_idx];
3102                 src_map = &tp->rx_jumbo_buffers[src_idx];
3103                 break;
3104
3105         default:
3106                 return;
3107         };
3108
3109         dest_map->skb = src_map->skb;
3110         pci_unmap_addr_set(dest_map, mapping,
3111                            pci_unmap_addr(src_map, mapping));
3112         dest_desc->addr_hi = src_desc->addr_hi;
3113         dest_desc->addr_lo = src_desc->addr_lo;
3114
3115         src_map->skb = NULL;
3116 }
3117
3118 #if TG3_VLAN_TAG_USED
3119 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3120 {
3121         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3122 }
3123 #endif
3124
3125 /* The RX ring scheme is composed of multiple rings which post fresh
3126  * buffers to the chip, and one special ring the chip uses to report
3127  * status back to the host.
3128  *
3129  * The special ring reports the status of received packets to the
3130  * host.  The chip does not write into the original descriptor the
3131  * RX buffer was obtained from.  The chip simply takes the original
3132  * descriptor as provided by the host, updates the status and length
3133  * field, then writes this into the next status ring entry.
3134  *
3135  * Each ring the host uses to post buffers to the chip is described
3136  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3137  * it is first placed into the on-chip ram.  When the packet's length
3138  * is known, it walks down the TG3_BDINFO entries to select the ring.
3139  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3140  * which is within the range of the new packet's length is chosen.
3141  *
3142  * The "separate ring for rx status" scheme may sound queer, but it makes
3143  * sense from a cache coherency perspective.  If only the host writes
3144  * to the buffer post rings, and only the chip writes to the rx status
3145  * rings, then cache lines never move beyond shared-modified state.
3146  * If both the host and chip were to write into the same ring, cache line
3147  * eviction could occur since both entities want it in an exclusive state.
3148  */
3149 static int tg3_rx(struct tg3 *tp, int budget)
3150 {
3151         u32 work_mask;
3152         u32 sw_idx = tp->rx_rcb_ptr;
3153         u16 hw_idx;
3154         int received;
3155
3156         hw_idx = tp->hw_status->idx[0].rx_producer;
3157         /*
3158          * We need to order the read of hw_idx and the read of
3159          * the opaque cookie.
3160          */
3161         rmb();
3162         work_mask = 0;
3163         received = 0;
3164         while (sw_idx != hw_idx && budget > 0) {
3165                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3166                 unsigned int len;
3167                 struct sk_buff *skb;
3168                 dma_addr_t dma_addr;
3169                 u32 opaque_key, desc_idx, *post_ptr;
3170
3171                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3172                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3173                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3174                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3175                                                   mapping);
3176                         skb = tp->rx_std_buffers[desc_idx].skb;
3177                         post_ptr = &tp->rx_std_ptr;
3178                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3179                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3180                                                   mapping);
3181                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3182                         post_ptr = &tp->rx_jumbo_ptr;
3183                 }
3184                 else {
3185                         goto next_pkt_nopost;
3186                 }
3187
3188                 work_mask |= opaque_key;
3189
3190                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3191                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3192                 drop_it:
3193                         tg3_recycle_rx(tp, opaque_key,
3194                                        desc_idx, *post_ptr);
3195                 drop_it_no_recycle:
3196                         /* Other statistics kept track of by card. */
3197                         tp->net_stats.rx_dropped++;
3198                         goto next_pkt;
3199                 }
3200
3201                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3202
3203                 if (len > RX_COPY_THRESHOLD 
3204                         && tp->rx_offset == 2
3205                         /* rx_offset != 2 iff this is a 5701 card running
3206                          * in PCI-X mode [see tg3_get_invariants()] */
3207                 ) {
3208                         int skb_size;
3209
3210                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3211                                                     desc_idx, *post_ptr);
3212                         if (skb_size < 0)
3213                                 goto drop_it;
3214
3215                         pci_unmap_single(tp->pdev, dma_addr,
3216                                          skb_size - tp->rx_offset,
3217                                          PCI_DMA_FROMDEVICE);
3218
3219                         skb_put(skb, len);
3220                 } else {
3221                         struct sk_buff *copy_skb;
3222
3223                         tg3_recycle_rx(tp, opaque_key,
3224                                        desc_idx, *post_ptr);
3225
3226                         copy_skb = dev_alloc_skb(len + 2);
3227                         if (copy_skb == NULL)
3228                                 goto drop_it_no_recycle;
3229
3230                         copy_skb->dev = tp->dev;
3231                         skb_reserve(copy_skb, 2);
3232                         skb_put(copy_skb, len);
3233                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3234                         memcpy(copy_skb->data, skb->data, len);
3235                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3236
3237                         /* We'll reuse the original ring buffer. */
3238                         skb = copy_skb;
3239                 }
3240
3241                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3242                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3243                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3244                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3245                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3246                 else
3247                         skb->ip_summed = CHECKSUM_NONE;
3248
3249                 skb->protocol = eth_type_trans(skb, tp->dev);
3250 #if TG3_VLAN_TAG_USED
3251                 if (tp->vlgrp != NULL &&
3252                     desc->type_flags & RXD_FLAG_VLAN) {
3253                         tg3_vlan_rx(tp, skb,
3254                                     desc->err_vlan & RXD_VLAN_MASK);
3255                 } else
3256 #endif
3257                         netif_receive_skb(skb);
3258
3259                 tp->dev->last_rx = jiffies;
3260                 received++;
3261                 budget--;
3262
3263 next_pkt:
3264                 (*post_ptr)++;
3265 next_pkt_nopost:
3266                 sw_idx++;
3267                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3268
3269                 /* Refresh hw_idx to see if there is new work */
3270                 if (sw_idx == hw_idx) {
3271                         hw_idx = tp->hw_status->idx[0].rx_producer;
3272                         rmb();
3273                 }
3274         }
3275
3276         /* ACK the status ring. */
3277         tp->rx_rcb_ptr = sw_idx;
3278         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3279
3280         /* Refill RX ring(s). */
3281         if (work_mask & RXD_OPAQUE_RING_STD) {
3282                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3283                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3284                              sw_idx);
3285         }
3286         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3287                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3288                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3289                              sw_idx);
3290         }
3291         mmiowb();
3292
3293         return received;
3294 }
3295
3296 static int tg3_poll(struct net_device *netdev, int *budget)
3297 {
3298         struct tg3 *tp = netdev_priv(netdev);
3299         struct tg3_hw_status *sblk = tp->hw_status;
3300         int done;
3301
3302         /* handle link change and other phy events */
3303         if (!(tp->tg3_flags &
3304               (TG3_FLAG_USE_LINKCHG_REG |
3305                TG3_FLAG_POLL_SERDES))) {
3306                 if (sblk->status & SD_STATUS_LINK_CHG) {
3307                         sblk->status = SD_STATUS_UPDATED |
3308                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3309                         spin_lock(&tp->lock);
3310                         tg3_setup_phy(tp, 0);
3311                         spin_unlock(&tp->lock);
3312                 }
3313         }
3314
3315         /* run TX completion thread */
3316         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3317                 tg3_tx(tp);
3318         }
3319
3320         /* run RX thread, within the bounds set by NAPI.
3321          * All RX "locking" is done by ensuring outside
3322          * code synchronizes with dev->poll()
3323          */
3324         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3325                 int orig_budget = *budget;
3326                 int work_done;
3327
3328                 if (orig_budget > netdev->quota)
3329                         orig_budget = netdev->quota;
3330
3331                 work_done = tg3_rx(tp, orig_budget);
3332
3333                 *budget -= work_done;
3334                 netdev->quota -= work_done;
3335         }
3336
3337         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3338                 tp->last_tag = sblk->status_tag;
3339                 rmb();
3340         } else
3341                 sblk->status &= ~SD_STATUS_UPDATED;
3342
3343         /* if no more work, tell net stack and NIC we're done */
3344         done = !tg3_has_work(tp);
3345         if (done) {
3346                 netif_rx_complete(netdev);
3347                 tg3_restart_ints(tp);
3348         }
3349
3350         return (done ? 0 : 1);
3351 }
3352
3353 static void tg3_irq_quiesce(struct tg3 *tp)
3354 {
3355         BUG_ON(tp->irq_sync);
3356
3357         tp->irq_sync = 1;
3358         smp_mb();
3359
3360         synchronize_irq(tp->pdev->irq);
3361 }
3362
3363 static inline int tg3_irq_sync(struct tg3 *tp)
3364 {
3365         return tp->irq_sync;
3366 }
3367
3368 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3369  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3370  * with as well.  Most of the time, this is not necessary except when
3371  * shutting down the device.
3372  */
3373 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3374 {
3375         if (irq_sync)
3376                 tg3_irq_quiesce(tp);
3377         spin_lock_bh(&tp->lock);
3378         spin_lock(&tp->tx_lock);
3379 }
3380
3381 static inline void tg3_full_unlock(struct tg3 *tp)
3382 {
3383         spin_unlock(&tp->tx_lock);
3384         spin_unlock_bh(&tp->lock);
3385 }
3386
3387 /* One-shot MSI handler - Chip automatically disables interrupt
3388  * after sending MSI so driver doesn't have to do it.
3389  */
3390 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3391 {
3392         struct net_device *dev = dev_id;
3393         struct tg3 *tp = netdev_priv(dev);
3394
3395         prefetch(tp->hw_status);
3396         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3397
3398         if (likely(!tg3_irq_sync(tp)))
3399                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3400
3401         return IRQ_HANDLED;
3402 }
3403
3404 /* MSI ISR - No need to check for interrupt sharing and no need to
3405  * flush status block and interrupt mailbox. PCI ordering rules
3406  * guarantee that MSI will arrive after the status block.
3407  */
3408 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3409 {
3410         struct net_device *dev = dev_id;
3411         struct tg3 *tp = netdev_priv(dev);
3412
3413         prefetch(tp->hw_status);
3414         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3415         /*
3416          * Writing any value to intr-mbox-0 clears PCI INTA# and
3417          * chip-internal interrupt pending events.
3418          * Writing non-zero to intr-mbox-0 additional tells the
3419          * NIC to stop sending us irqs, engaging "in-intr-handler"
3420          * event coalescing.
3421          */
3422         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3423         if (likely(!tg3_irq_sync(tp)))
3424                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3425
3426         return IRQ_RETVAL(1);
3427 }
3428
3429 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3430 {
3431         struct net_device *dev = dev_id;
3432         struct tg3 *tp = netdev_priv(dev);
3433         struct tg3_hw_status *sblk = tp->hw_status;
3434         unsigned int handled = 1;
3435
3436         /* In INTx mode, it is possible for the interrupt to arrive at
3437          * the CPU before the status block posted prior to the interrupt.
3438          * Reading the PCI State register will confirm whether the
3439          * interrupt is ours and will flush the status block.
3440          */
3441         if ((sblk->status & SD_STATUS_UPDATED) ||
3442             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3443                 /*
3444                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3445                  * chip-internal interrupt pending events.
3446                  * Writing non-zero to intr-mbox-0 additional tells the
3447                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3448                  * event coalescing.
3449                  */
3450                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3451                              0x00000001);
3452                 if (tg3_irq_sync(tp))
3453                         goto out;
3454                 sblk->status &= ~SD_STATUS_UPDATED;
3455                 if (likely(tg3_has_work(tp))) {
3456                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3457                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3458                 } else {
3459                         /* No work, shared interrupt perhaps?  re-enable
3460                          * interrupts, and flush that PCI write
3461                          */
3462                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3463                                 0x00000000);
3464                 }
3465         } else {        /* shared interrupt */
3466                 handled = 0;
3467         }
3468 out:
3469         return IRQ_RETVAL(handled);
3470 }
3471
3472 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3473 {
3474         struct net_device *dev = dev_id;
3475         struct tg3 *tp = netdev_priv(dev);
3476         struct tg3_hw_status *sblk = tp->hw_status;
3477         unsigned int handled = 1;
3478
3479         /* In INTx mode, it is possible for the interrupt to arrive at
3480          * the CPU before the status block posted prior to the interrupt.
3481          * Reading the PCI State register will confirm whether the
3482          * interrupt is ours and will flush the status block.
3483          */
3484         if ((sblk->status_tag != tp->last_tag) ||
3485             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3486                 /*
3487                  * writing any value to intr-mbox-0 clears PCI INTA# and
3488                  * chip-internal interrupt pending events.
3489                  * writing non-zero to intr-mbox-0 additional tells the
3490                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3491                  * event coalescing.
3492                  */
3493                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3494                              0x00000001);
3495                 if (tg3_irq_sync(tp))
3496                         goto out;
3497                 if (netif_rx_schedule_prep(dev)) {
3498                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3499                         /* Update last_tag to mark that this status has been
3500                          * seen. Because interrupt may be shared, we may be
3501                          * racing with tg3_poll(), so only update last_tag
3502                          * if tg3_poll() is not scheduled.
3503                          */
3504                         tp->last_tag = sblk->status_tag;
3505                         __netif_rx_schedule(dev);
3506                 }
3507         } else {        /* shared interrupt */
3508                 handled = 0;
3509         }
3510 out:
3511         return IRQ_RETVAL(handled);
3512 }
3513
3514 /* ISR for interrupt test */
3515 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3516                 struct pt_regs *regs)
3517 {
3518         struct net_device *dev = dev_id;
3519         struct tg3 *tp = netdev_priv(dev);
3520         struct tg3_hw_status *sblk = tp->hw_status;
3521
3522         if ((sblk->status & SD_STATUS_UPDATED) ||
3523             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3524                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3525                              0x00000001);
3526                 return IRQ_RETVAL(1);
3527         }
3528         return IRQ_RETVAL(0);
3529 }
3530
3531 static int tg3_init_hw(struct tg3 *);
3532 static int tg3_halt(struct tg3 *, int, int);
3533
3534 #ifdef CONFIG_NET_POLL_CONTROLLER
3535 static void tg3_poll_controller(struct net_device *dev)
3536 {
3537         struct tg3 *tp = netdev_priv(dev);
3538
3539         tg3_interrupt(tp->pdev->irq, dev, NULL);
3540 }
3541 #endif
3542
3543 static void tg3_reset_task(void *_data)
3544 {
3545         struct tg3 *tp = _data;
3546         unsigned int restart_timer;
3547
3548         tg3_full_lock(tp, 0);
3549         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3550
3551         if (!netif_running(tp->dev)) {
3552                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3553                 tg3_full_unlock(tp);
3554                 return;
3555         }
3556
3557         tg3_full_unlock(tp);
3558
3559         tg3_netif_stop(tp);
3560
3561         tg3_full_lock(tp, 1);
3562
3563         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3564         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3565
3566         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3567         tg3_init_hw(tp);
3568
3569         tg3_netif_start(tp);
3570
3571         if (restart_timer)
3572                 mod_timer(&tp->timer, jiffies + 1);
3573
3574         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3575
3576         tg3_full_unlock(tp);
3577 }
3578
3579 static void tg3_tx_timeout(struct net_device *dev)
3580 {
3581         struct tg3 *tp = netdev_priv(dev);
3582
3583         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3584                dev->name);
3585
3586         schedule_work(&tp->reset_task);
3587 }
3588
3589 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3590 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3591 {
3592         u32 base = (u32) mapping & 0xffffffff;
3593
3594         return ((base > 0xffffdcc0) &&
3595                 (base + len + 8 < base));
3596 }
3597
3598 /* Test for DMA addresses > 40-bit */
3599 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3600                                           int len)
3601 {
3602 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3603         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3604                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3605         return 0;
3606 #else
3607         return 0;
3608 #endif
3609 }
3610
3611 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3612
3613 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3614 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3615                                        u32 last_plus_one, u32 *start,
3616                                        u32 base_flags, u32 mss)
3617 {
3618         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3619         dma_addr_t new_addr = 0;
3620         u32 entry = *start;
3621         int i, ret = 0;
3622
3623         if (!new_skb) {
3624                 ret = -1;
3625         } else {
3626                 /* New SKB is guaranteed to be linear. */
3627                 entry = *start;
3628                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3629                                           PCI_DMA_TODEVICE);
3630                 /* Make sure new skb does not cross any 4G boundaries.
3631                  * Drop the packet if it does.
3632                  */
3633                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3634                         ret = -1;
3635                         dev_kfree_skb(new_skb);
3636                         new_skb = NULL;
3637                 } else {
3638                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3639                                     base_flags, 1 | (mss << 1));
3640                         *start = NEXT_TX(entry);
3641                 }
3642         }
3643
3644         /* Now clean up the sw ring entries. */
3645         i = 0;
3646         while (entry != last_plus_one) {
3647                 int len;
3648
3649                 if (i == 0)
3650                         len = skb_headlen(skb);
3651                 else
3652                         len = skb_shinfo(skb)->frags[i-1].size;
3653                 pci_unmap_single(tp->pdev,
3654                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3655                                  len, PCI_DMA_TODEVICE);
3656                 if (i == 0) {
3657                         tp->tx_buffers[entry].skb = new_skb;
3658                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3659                 } else {
3660                         tp->tx_buffers[entry].skb = NULL;
3661                 }
3662                 entry = NEXT_TX(entry);
3663                 i++;
3664         }
3665
3666         dev_kfree_skb(skb);
3667
3668         return ret;
3669 }
3670
3671 static void tg3_set_txd(struct tg3 *tp, int entry,
3672                         dma_addr_t mapping, int len, u32 flags,
3673                         u32 mss_and_is_end)
3674 {
3675         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3676         int is_end = (mss_and_is_end & 0x1);
3677         u32 mss = (mss_and_is_end >> 1);
3678         u32 vlan_tag = 0;
3679
3680         if (is_end)
3681                 flags |= TXD_FLAG_END;
3682         if (flags & TXD_FLAG_VLAN) {
3683                 vlan_tag = flags >> 16;
3684                 flags &= 0xffff;
3685         }
3686         vlan_tag |= (mss << TXD_MSS_SHIFT);
3687
3688         txd->addr_hi = ((u64) mapping >> 32);
3689         txd->addr_lo = ((u64) mapping & 0xffffffff);
3690         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3691         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3692 }
3693
3694 /* hard_start_xmit for devices that don't have any bugs and
3695  * support TG3_FLG2_HW_TSO_2 only.
3696  */
3697 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3698 {
3699         struct tg3 *tp = netdev_priv(dev);
3700         dma_addr_t mapping;
3701         u32 len, entry, base_flags, mss;
3702
3703         len = skb_headlen(skb);
3704
3705         /* No BH disabling for tx_lock here.  We are running in BH disabled
3706          * context and TX reclaim runs via tp->poll inside of a software
3707          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3708          * no IRQ context deadlocks to worry about either.  Rejoice!
3709          */
3710         if (!spin_trylock(&tp->tx_lock))
3711                 return NETDEV_TX_LOCKED;
3712
3713         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3714                 if (!netif_queue_stopped(dev)) {
3715                         netif_stop_queue(dev);
3716
3717                         /* This is a hard error, log it. */
3718                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3719                                "queue awake!\n", dev->name);
3720                 }
3721                 spin_unlock(&tp->tx_lock);
3722                 return NETDEV_TX_BUSY;
3723         }
3724
3725         entry = tp->tx_prod;
3726         base_flags = 0;
3727 #if TG3_TSO_SUPPORT != 0
3728         mss = 0;
3729         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3730             (mss = skb_shinfo(skb)->tso_size) != 0) {
3731                 int tcp_opt_len, ip_tcp_len;
3732
3733                 if (skb_header_cloned(skb) &&
3734                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3735                         dev_kfree_skb(skb);
3736                         goto out_unlock;
3737                 }
3738
3739                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3740                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3741
3742                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3743                                TXD_FLAG_CPU_POST_DMA);
3744
3745                 skb->nh.iph->check = 0;
3746                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3747
3748                 skb->h.th->check = 0;
3749
3750                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3751         }
3752         else if (skb->ip_summed == CHECKSUM_HW)
3753                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3754 #else
3755         mss = 0;
3756         if (skb->ip_summed == CHECKSUM_HW)
3757                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3758 #endif
3759 #if TG3_VLAN_TAG_USED
3760         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3761                 base_flags |= (TXD_FLAG_VLAN |
3762                                (vlan_tx_tag_get(skb) << 16));
3763 #endif
3764
3765         /* Queue skb data, a.k.a. the main skb fragment. */
3766         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3767
3768         tp->tx_buffers[entry].skb = skb;
3769         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3770
3771         tg3_set_txd(tp, entry, mapping, len, base_flags,
3772                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3773
3774         entry = NEXT_TX(entry);
3775
3776         /* Now loop through additional data fragments, and queue them. */
3777         if (skb_shinfo(skb)->nr_frags > 0) {
3778                 unsigned int i, last;
3779
3780                 last = skb_shinfo(skb)->nr_frags - 1;
3781                 for (i = 0; i <= last; i++) {
3782                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3783
3784                         len = frag->size;
3785                         mapping = pci_map_page(tp->pdev,
3786                                                frag->page,
3787                                                frag->page_offset,
3788                                                len, PCI_DMA_TODEVICE);
3789
3790                         tp->tx_buffers[entry].skb = NULL;
3791                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3792
3793                         tg3_set_txd(tp, entry, mapping, len,
3794                                     base_flags, (i == last) | (mss << 1));
3795
3796                         entry = NEXT_TX(entry);
3797                 }
3798         }
3799
3800         /* Packets are ready, update Tx producer idx local and on card. */
3801         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3802
3803         tp->tx_prod = entry;
3804         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3805                 netif_stop_queue(dev);
3806                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3807                         netif_wake_queue(tp->dev);
3808         }
3809
3810 out_unlock:
3811         mmiowb();
3812         spin_unlock(&tp->tx_lock);
3813
3814         dev->trans_start = jiffies;
3815
3816         return NETDEV_TX_OK;
3817 }
3818
3819 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3820  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3821  */
3822 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3823 {
3824         struct tg3 *tp = netdev_priv(dev);
3825         dma_addr_t mapping;
3826         u32 len, entry, base_flags, mss;
3827         int would_hit_hwbug;
3828
3829         len = skb_headlen(skb);
3830
3831         /* No BH disabling for tx_lock here.  We are running in BH disabled
3832          * context and TX reclaim runs via tp->poll inside of a software
3833          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3834          * no IRQ context deadlocks to worry about either.  Rejoice!
3835          */
3836         if (!spin_trylock(&tp->tx_lock))
3837                 return NETDEV_TX_LOCKED; 
3838
3839         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3840                 if (!netif_queue_stopped(dev)) {
3841                         netif_stop_queue(dev);
3842
3843                         /* This is a hard error, log it. */
3844                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3845                                "queue awake!\n", dev->name);
3846                 }
3847                 spin_unlock(&tp->tx_lock);
3848                 return NETDEV_TX_BUSY;
3849         }
3850
3851         entry = tp->tx_prod;
3852         base_flags = 0;
3853         if (skb->ip_summed == CHECKSUM_HW)
3854                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3855 #if TG3_TSO_SUPPORT != 0
3856         mss = 0;
3857         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3858             (mss = skb_shinfo(skb)->tso_size) != 0) {
3859                 int tcp_opt_len, ip_tcp_len;
3860
3861                 if (skb_header_cloned(skb) &&
3862                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3863                         dev_kfree_skb(skb);
3864                         goto out_unlock;
3865                 }
3866
3867                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3868                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3869
3870                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3871                                TXD_FLAG_CPU_POST_DMA);
3872
3873                 skb->nh.iph->check = 0;
3874                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3875                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3876                         skb->h.th->check = 0;
3877                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3878                 }
3879                 else {
3880                         skb->h.th->check =
3881                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3882                                                    skb->nh.iph->daddr,
3883                                                    0, IPPROTO_TCP, 0);
3884                 }
3885
3886                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3887                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3888                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3889                                 int tsflags;
3890
3891                                 tsflags = ((skb->nh.iph->ihl - 5) +
3892                                            (tcp_opt_len >> 2));
3893                                 mss |= (tsflags << 11);
3894                         }
3895                 } else {
3896                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3897                                 int tsflags;
3898
3899                                 tsflags = ((skb->nh.iph->ihl - 5) +
3900                                            (tcp_opt_len >> 2));
3901                                 base_flags |= tsflags << 12;
3902                         }
3903                 }
3904         }
3905 #else
3906         mss = 0;
3907 #endif
3908 #if TG3_VLAN_TAG_USED
3909         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3910                 base_flags |= (TXD_FLAG_VLAN |
3911                                (vlan_tx_tag_get(skb) << 16));
3912 #endif
3913
3914         /* Queue skb data, a.k.a. the main skb fragment. */
3915         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3916
3917         tp->tx_buffers[entry].skb = skb;
3918         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3919
3920         would_hit_hwbug = 0;
3921
3922         if (tg3_4g_overflow_test(mapping, len))
3923                 would_hit_hwbug = 1;
3924
3925         tg3_set_txd(tp, entry, mapping, len, base_flags,
3926                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3927
3928         entry = NEXT_TX(entry);
3929
3930         /* Now loop through additional data fragments, and queue them. */
3931         if (skb_shinfo(skb)->nr_frags > 0) {
3932                 unsigned int i, last;
3933
3934                 last = skb_shinfo(skb)->nr_frags - 1;
3935                 for (i = 0; i <= last; i++) {
3936                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3937
3938                         len = frag->size;
3939                         mapping = pci_map_page(tp->pdev,
3940                                                frag->page,
3941                                                frag->page_offset,
3942                                                len, PCI_DMA_TODEVICE);
3943
3944                         tp->tx_buffers[entry].skb = NULL;
3945                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3946
3947                         if (tg3_4g_overflow_test(mapping, len))
3948                                 would_hit_hwbug = 1;
3949
3950                         if (tg3_40bit_overflow_test(tp, mapping, len))
3951                                 would_hit_hwbug = 1;
3952
3953                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3954                                 tg3_set_txd(tp, entry, mapping, len,
3955                                             base_flags, (i == last)|(mss << 1));
3956                         else
3957                                 tg3_set_txd(tp, entry, mapping, len,
3958                                             base_flags, (i == last));
3959
3960                         entry = NEXT_TX(entry);
3961                 }
3962         }
3963
3964         if (would_hit_hwbug) {
3965                 u32 last_plus_one = entry;
3966                 u32 start;
3967
3968                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3969                 start &= (TG3_TX_RING_SIZE - 1);
3970
3971                 /* If the workaround fails due to memory/mapping
3972                  * failure, silently drop this packet.
3973                  */
3974                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3975                                                 &start, base_flags, mss))
3976                         goto out_unlock;
3977
3978                 entry = start;
3979         }
3980
3981         /* Packets are ready, update Tx producer idx local and on card. */
3982         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3983
3984         tp->tx_prod = entry;
3985         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3986                 netif_stop_queue(dev);
3987                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3988                         netif_wake_queue(tp->dev);
3989         }
3990
3991 out_unlock:
3992         mmiowb();
3993         spin_unlock(&tp->tx_lock);
3994
3995         dev->trans_start = jiffies;
3996
3997         return NETDEV_TX_OK;
3998 }
3999
4000 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4001                                int new_mtu)
4002 {
4003         dev->mtu = new_mtu;
4004
4005         if (new_mtu > ETH_DATA_LEN) {
4006                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4007                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4008                         ethtool_op_set_tso(dev, 0);
4009                 }
4010                 else
4011                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4012         } else {
4013                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4014                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4015                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4016         }
4017 }
4018
4019 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4020 {
4021         struct tg3 *tp = netdev_priv(dev);
4022
4023         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4024                 return -EINVAL;
4025
4026         if (!netif_running(dev)) {
4027                 /* We'll just catch it later when the
4028                  * device is up'd.
4029                  */
4030                 tg3_set_mtu(dev, tp, new_mtu);
4031                 return 0;
4032         }
4033
4034         tg3_netif_stop(tp);
4035
4036         tg3_full_lock(tp, 1);
4037
4038         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4039
4040         tg3_set_mtu(dev, tp, new_mtu);
4041
4042         tg3_init_hw(tp);
4043
4044         tg3_netif_start(tp);
4045
4046         tg3_full_unlock(tp);
4047
4048         return 0;
4049 }
4050
4051 /* Free up pending packets in all rx/tx rings.
4052  *
4053  * The chip has been shut down and the driver detached from
4054  * the networking, so no interrupts or new tx packets will
4055  * end up in the driver.  tp->{tx,}lock is not held and we are not
4056  * in an interrupt context and thus may sleep.
4057  */
4058 static void tg3_free_rings(struct tg3 *tp)
4059 {
4060         struct ring_info *rxp;
4061         int i;
4062
4063         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4064                 rxp = &tp->rx_std_buffers[i];
4065
4066                 if (rxp->skb == NULL)
4067                         continue;
4068                 pci_unmap_single(tp->pdev,
4069                                  pci_unmap_addr(rxp, mapping),
4070                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4071                                  PCI_DMA_FROMDEVICE);
4072                 dev_kfree_skb_any(rxp->skb);
4073                 rxp->skb = NULL;
4074         }
4075
4076         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4077                 rxp = &tp->rx_jumbo_buffers[i];
4078
4079                 if (rxp->skb == NULL)
4080                         continue;
4081                 pci_unmap_single(tp->pdev,
4082                                  pci_unmap_addr(rxp, mapping),
4083                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4084                                  PCI_DMA_FROMDEVICE);
4085                 dev_kfree_skb_any(rxp->skb);
4086                 rxp->skb = NULL;
4087         }
4088
4089         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4090                 struct tx_ring_info *txp;
4091                 struct sk_buff *skb;
4092                 int j;
4093
4094                 txp = &tp->tx_buffers[i];
4095                 skb = txp->skb;
4096
4097                 if (skb == NULL) {
4098                         i++;
4099                         continue;
4100                 }
4101
4102                 pci_unmap_single(tp->pdev,
4103                                  pci_unmap_addr(txp, mapping),
4104                                  skb_headlen(skb),
4105                                  PCI_DMA_TODEVICE);
4106                 txp->skb = NULL;
4107
4108                 i++;
4109
4110                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4111                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4112                         pci_unmap_page(tp->pdev,
4113                                        pci_unmap_addr(txp, mapping),
4114                                        skb_shinfo(skb)->frags[j].size,
4115                                        PCI_DMA_TODEVICE);
4116                         i++;
4117                 }
4118
4119                 dev_kfree_skb_any(skb);
4120         }
4121 }
4122
4123 /* Initialize tx/rx rings for packet processing.
4124  *
4125  * The chip has been shut down and the driver detached from
4126  * the networking, so no interrupts or new tx packets will
4127  * end up in the driver.  tp->{tx,}lock are held and thus
4128  * we may not sleep.
4129  */
4130 static void tg3_init_rings(struct tg3 *tp)
4131 {
4132         u32 i;
4133
4134         /* Free up all the SKBs. */
4135         tg3_free_rings(tp);
4136
4137         /* Zero out all descriptors. */
4138         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4139         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4140         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4141         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4142
4143         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4144         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4145             (tp->dev->mtu > ETH_DATA_LEN))
4146                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4147
4148         /* Initialize invariants of the rings, we only set this
4149          * stuff once.  This works because the card does not
4150          * write into the rx buffer posting rings.
4151          */
4152         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4153                 struct tg3_rx_buffer_desc *rxd;
4154
4155                 rxd = &tp->rx_std[i];
4156                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4157                         << RXD_LEN_SHIFT;
4158                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4159                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4160                                (i << RXD_OPAQUE_INDEX_SHIFT));
4161         }
4162
4163         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4164                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4165                         struct tg3_rx_buffer_desc *rxd;
4166
4167                         rxd = &tp->rx_jumbo[i];
4168                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4169                                 << RXD_LEN_SHIFT;
4170                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4171                                 RXD_FLAG_JUMBO;
4172                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4173                                (i << RXD_OPAQUE_INDEX_SHIFT));
4174                 }
4175         }
4176
4177         /* Now allocate fresh SKBs for each rx ring. */
4178         for (i = 0; i < tp->rx_pending; i++) {
4179                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4180                                      -1, i) < 0)
4181                         break;
4182         }
4183
4184         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4185                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4186                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4187                                              -1, i) < 0)
4188                                 break;
4189                 }
4190         }
4191 }
4192
4193 /*
4194  * Must not be invoked with interrupt sources disabled and
4195  * the hardware shutdown down.
4196  */
4197 static void tg3_free_consistent(struct tg3 *tp)
4198 {
4199         kfree(tp->rx_std_buffers);
4200         tp->rx_std_buffers = NULL;
4201         if (tp->rx_std) {
4202                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4203                                     tp->rx_std, tp->rx_std_mapping);
4204                 tp->rx_std = NULL;
4205         }
4206         if (tp->rx_jumbo) {
4207                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4208                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4209                 tp->rx_jumbo = NULL;
4210         }
4211         if (tp->rx_rcb) {
4212                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4213                                     tp->rx_rcb, tp->rx_rcb_mapping);
4214                 tp->rx_rcb = NULL;
4215         }
4216         if (tp->tx_ring) {
4217                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4218                         tp->tx_ring, tp->tx_desc_mapping);
4219                 tp->tx_ring = NULL;
4220         }
4221         if (tp->hw_status) {
4222                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4223                                     tp->hw_status, tp->status_mapping);
4224                 tp->hw_status = NULL;
4225         }
4226         if (tp->hw_stats) {
4227                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4228                                     tp->hw_stats, tp->stats_mapping);
4229                 tp->hw_stats = NULL;
4230         }
4231 }
4232
4233 /*
4234  * Must not be invoked with interrupt sources disabled and
4235  * the hardware shutdown down.  Can sleep.
4236  */
4237 static int tg3_alloc_consistent(struct tg3 *tp)
4238 {
4239         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4240                                       (TG3_RX_RING_SIZE +
4241                                        TG3_RX_JUMBO_RING_SIZE)) +
4242                                      (sizeof(struct tx_ring_info) *
4243                                       TG3_TX_RING_SIZE),
4244                                      GFP_KERNEL);
4245         if (!tp->rx_std_buffers)
4246                 return -ENOMEM;
4247
4248         memset(tp->rx_std_buffers, 0,
4249                (sizeof(struct ring_info) *
4250                 (TG3_RX_RING_SIZE +
4251                  TG3_RX_JUMBO_RING_SIZE)) +
4252                (sizeof(struct tx_ring_info) *
4253                 TG3_TX_RING_SIZE));
4254
4255         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4256         tp->tx_buffers = (struct tx_ring_info *)
4257                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4258
4259         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4260                                           &tp->rx_std_mapping);
4261         if (!tp->rx_std)
4262                 goto err_out;
4263
4264         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4265                                             &tp->rx_jumbo_mapping);
4266
4267         if (!tp->rx_jumbo)
4268                 goto err_out;
4269
4270         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4271                                           &tp->rx_rcb_mapping);
4272         if (!tp->rx_rcb)
4273                 goto err_out;
4274
4275         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4276                                            &tp->tx_desc_mapping);
4277         if (!tp->tx_ring)
4278                 goto err_out;
4279
4280         tp->hw_status = pci_alloc_consistent(tp->pdev,
4281                                              TG3_HW_STATUS_SIZE,
4282                                              &tp->status_mapping);
4283         if (!tp->hw_status)
4284                 goto err_out;
4285
4286         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4287                                             sizeof(struct tg3_hw_stats),
4288                                             &tp->stats_mapping);
4289         if (!tp->hw_stats)
4290                 goto err_out;
4291
4292         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4293         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4294
4295         return 0;
4296
4297 err_out:
4298         tg3_free_consistent(tp);
4299         return -ENOMEM;
4300 }
4301
4302 #define MAX_WAIT_CNT 1000
4303
4304 /* To stop a block, clear the enable bit and poll till it
4305  * clears.  tp->lock is held.
4306  */
4307 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4308 {
4309         unsigned int i;
4310         u32 val;
4311
4312         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4313                 switch (ofs) {
4314                 case RCVLSC_MODE:
4315                 case DMAC_MODE:
4316                 case MBFREE_MODE:
4317                 case BUFMGR_MODE:
4318                 case MEMARB_MODE:
4319                         /* We can't enable/disable these bits of the
4320                          * 5705/5750, just say success.
4321                          */
4322                         return 0;
4323
4324                 default:
4325                         break;
4326                 };
4327         }
4328
4329         val = tr32(ofs);
4330         val &= ~enable_bit;
4331         tw32_f(ofs, val);
4332
4333         for (i = 0; i < MAX_WAIT_CNT; i++) {
4334                 udelay(100);
4335                 val = tr32(ofs);
4336                 if ((val & enable_bit) == 0)
4337                         break;
4338         }
4339
4340         if (i == MAX_WAIT_CNT && !silent) {
4341                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4342                        "ofs=%lx enable_bit=%x\n",
4343                        ofs, enable_bit);
4344                 return -ENODEV;
4345         }
4346
4347         return 0;
4348 }
4349
4350 /* tp->lock is held. */
4351 static int tg3_abort_hw(struct tg3 *tp, int silent)
4352 {
4353         int i, err;
4354
4355         tg3_disable_ints(tp);
4356
4357         tp->rx_mode &= ~RX_MODE_ENABLE;
4358         tw32_f(MAC_RX_MODE, tp->rx_mode);
4359         udelay(10);
4360
4361         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4362         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4363         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4364         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4365         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4366         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4367
4368         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4371         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4372         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4373         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4374         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4375
4376         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4377         tw32_f(MAC_MODE, tp->mac_mode);
4378         udelay(40);
4379
4380         tp->tx_mode &= ~TX_MODE_ENABLE;
4381         tw32_f(MAC_TX_MODE, tp->tx_mode);
4382
4383         for (i = 0; i < MAX_WAIT_CNT; i++) {
4384                 udelay(100);
4385                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4386                         break;
4387         }
4388         if (i >= MAX_WAIT_CNT) {
4389                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4390                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4391                        tp->dev->name, tr32(MAC_TX_MODE));
4392                 err |= -ENODEV;
4393         }
4394
4395         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4396         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4397         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4398
4399         tw32(FTQ_RESET, 0xffffffff);
4400         tw32(FTQ_RESET, 0x00000000);
4401
4402         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4403         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4404
4405         if (tp->hw_status)
4406                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4407         if (tp->hw_stats)
4408                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4409
4410         return err;
4411 }
4412
4413 /* tp->lock is held. */
4414 static int tg3_nvram_lock(struct tg3 *tp)
4415 {
4416         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4417                 int i;
4418
4419                 if (tp->nvram_lock_cnt == 0) {
4420                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4421                         for (i = 0; i < 8000; i++) {
4422                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4423                                         break;
4424                                 udelay(20);
4425                         }
4426                         if (i == 8000) {
4427                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4428                                 return -ENODEV;
4429                         }
4430                 }
4431                 tp->nvram_lock_cnt++;
4432         }
4433         return 0;
4434 }
4435
4436 /* tp->lock is held. */
4437 static void tg3_nvram_unlock(struct tg3 *tp)
4438 {
4439         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4440                 if (tp->nvram_lock_cnt > 0)
4441                         tp->nvram_lock_cnt--;
4442                 if (tp->nvram_lock_cnt == 0)
4443                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4444         }
4445 }
4446
4447 /* tp->lock is held. */
4448 static void tg3_enable_nvram_access(struct tg3 *tp)
4449 {
4450         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4451             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4452                 u32 nvaccess = tr32(NVRAM_ACCESS);
4453
4454                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4455         }
4456 }
4457
4458 /* tp->lock is held. */
4459 static void tg3_disable_nvram_access(struct tg3 *tp)
4460 {
4461         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4462             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4463                 u32 nvaccess = tr32(NVRAM_ACCESS);
4464
4465                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4466         }
4467 }
4468
4469 /* tp->lock is held. */
4470 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4471 {
4472         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4473                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4474                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4475
4476         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4477                 switch (kind) {
4478                 case RESET_KIND_INIT:
4479                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4480                                       DRV_STATE_START);
4481                         break;
4482
4483                 case RESET_KIND_SHUTDOWN:
4484                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4485                                       DRV_STATE_UNLOAD);
4486                         break;
4487
4488                 case RESET_KIND_SUSPEND:
4489                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4490                                       DRV_STATE_SUSPEND);
4491                         break;
4492
4493                 default:
4494                         break;
4495                 };
4496         }
4497 }
4498
4499 /* tp->lock is held. */
4500 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4501 {
4502         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4503                 switch (kind) {
4504                 case RESET_KIND_INIT:
4505                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4506                                       DRV_STATE_START_DONE);
4507                         break;
4508
4509                 case RESET_KIND_SHUTDOWN:
4510                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4511                                       DRV_STATE_UNLOAD_DONE);
4512                         break;
4513
4514                 default:
4515                         break;
4516                 };
4517         }
4518 }
4519
4520 /* tp->lock is held. */
4521 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4522 {
4523         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4524                 switch (kind) {
4525                 case RESET_KIND_INIT:
4526                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4527                                       DRV_STATE_START);
4528                         break;
4529
4530                 case RESET_KIND_SHUTDOWN:
4531                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4532                                       DRV_STATE_UNLOAD);
4533                         break;
4534
4535                 case RESET_KIND_SUSPEND:
4536                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4537                                       DRV_STATE_SUSPEND);
4538                         break;
4539
4540                 default:
4541                         break;
4542                 };
4543         }
4544 }
4545
4546 static void tg3_stop_fw(struct tg3 *);
4547
4548 /* tp->lock is held. */
4549 static int tg3_chip_reset(struct tg3 *tp)
4550 {
4551         u32 val;
4552         void (*write_op)(struct tg3 *, u32, u32);
4553         int i;
4554
4555         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4556                 tg3_nvram_lock(tp);
4557                 /* No matching tg3_nvram_unlock() after this because
4558                  * chip reset below will undo the nvram lock.
4559                  */
4560                 tp->nvram_lock_cnt = 0;
4561         }
4562
4563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4566                 tw32(GRC_FASTBOOT_PC, 0);
4567
4568         /*
4569          * We must avoid the readl() that normally takes place.
4570          * It locks machines, causes machine checks, and other
4571          * fun things.  So, temporarily disable the 5701
4572          * hardware workaround, while we do the reset.
4573          */
4574         write_op = tp->write32;
4575         if (write_op == tg3_write_flush_reg32)
4576                 tp->write32 = tg3_write32;
4577
4578         /* do the reset */
4579         val = GRC_MISC_CFG_CORECLK_RESET;
4580
4581         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4582                 if (tr32(0x7e2c) == 0x60) {
4583                         tw32(0x7e2c, 0x20);
4584                 }
4585                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4586                         tw32(GRC_MISC_CFG, (1 << 29));
4587                         val |= (1 << 29);
4588                 }
4589         }
4590
4591         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4592                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4593         tw32(GRC_MISC_CFG, val);
4594
4595         /* restore 5701 hardware bug workaround write method */
4596         tp->write32 = write_op;
4597
4598         /* Unfortunately, we have to delay before the PCI read back.
4599          * Some 575X chips even will not respond to a PCI cfg access
4600          * when the reset command is given to the chip.
4601          *
4602          * How do these hardware designers expect things to work
4603          * properly if the PCI write is posted for a long period
4604          * of time?  It is always necessary to have some method by
4605          * which a register read back can occur to push the write
4606          * out which does the reset.
4607          *
4608          * For most tg3 variants the trick below was working.
4609          * Ho hum...
4610          */
4611         udelay(120);
4612
4613         /* Flush PCI posted writes.  The normal MMIO registers
4614          * are inaccessible at this time so this is the only
4615          * way to make this reliably (actually, this is no longer
4616          * the case, see above).  I tried to use indirect
4617          * register read/write but this upset some 5701 variants.
4618          */
4619         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4620
4621         udelay(120);
4622
4623         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4624                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4625                         int i;
4626                         u32 cfg_val;
4627
4628                         /* Wait for link training to complete.  */
4629                         for (i = 0; i < 5000; i++)
4630                                 udelay(100);
4631
4632                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4633                         pci_write_config_dword(tp->pdev, 0xc4,
4634                                                cfg_val | (1 << 15));
4635                 }
4636                 /* Set PCIE max payload size and clear error status.  */
4637                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4638         }
4639
4640         /* Re-enable indirect register accesses. */
4641         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4642                                tp->misc_host_ctrl);
4643
4644         /* Set MAX PCI retry to zero. */
4645         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4646         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4647             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4648                 val |= PCISTATE_RETRY_SAME_DMA;
4649         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4650
4651         pci_restore_state(tp->pdev);
4652
4653         /* Make sure PCI-X relaxed ordering bit is clear. */
4654         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4655         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4656         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4657
4658         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4659                 u32 val;
4660
4661                 /* Chip reset on 5780 will reset MSI enable bit,
4662                  * so need to restore it.
4663                  */
4664                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4665                         u16 ctrl;
4666
4667                         pci_read_config_word(tp->pdev,
4668                                              tp->msi_cap + PCI_MSI_FLAGS,
4669                                              &ctrl);
4670                         pci_write_config_word(tp->pdev,
4671                                               tp->msi_cap + PCI_MSI_FLAGS,
4672                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4673                         val = tr32(MSGINT_MODE);
4674                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4675                 }
4676
4677                 val = tr32(MEMARB_MODE);
4678                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4679
4680         } else
4681                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4682
4683         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4684                 tg3_stop_fw(tp);
4685                 tw32(0x5000, 0x400);
4686         }
4687
4688         tw32(GRC_MODE, tp->grc_mode);
4689
4690         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4691                 u32 val = tr32(0xc4);
4692
4693                 tw32(0xc4, val | (1 << 15));
4694         }
4695
4696         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4698                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4699                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4700                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4701                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4702         }
4703
4704         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4705                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4706                 tw32_f(MAC_MODE, tp->mac_mode);
4707         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4708                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4709                 tw32_f(MAC_MODE, tp->mac_mode);
4710         } else
4711                 tw32_f(MAC_MODE, 0);
4712         udelay(40);
4713
4714         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4715                 /* Wait for firmware initialization to complete. */
4716                 for (i = 0; i < 100000; i++) {
4717                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4718                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4719                                 break;
4720                         udelay(10);
4721                 }
4722                 if (i >= 100000) {
4723                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4724                                "firmware will not restart magic=%08x\n",
4725                                tp->dev->name, val);
4726                         return -ENODEV;
4727                 }
4728         }
4729
4730         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4731             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4732                 u32 val = tr32(0x7c00);
4733
4734                 tw32(0x7c00, val | (1 << 25));
4735         }
4736
4737         /* Reprobe ASF enable state.  */
4738         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4739         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4740         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4741         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4742                 u32 nic_cfg;
4743
4744                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4745                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4746                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4747                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4748                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4749                 }
4750         }
4751
4752         return 0;
4753 }
4754
4755 /* tp->lock is held. */
4756 static void tg3_stop_fw(struct tg3 *tp)
4757 {
4758         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4759                 u32 val;
4760                 int i;
4761
4762                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4763                 val = tr32(GRC_RX_CPU_EVENT);
4764                 val |= (1 << 14);
4765                 tw32(GRC_RX_CPU_EVENT, val);
4766
4767                 /* Wait for RX cpu to ACK the event.  */
4768                 for (i = 0; i < 100; i++) {
4769                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4770                                 break;
4771                         udelay(1);
4772                 }
4773         }
4774 }
4775
4776 /* tp->lock is held. */
4777 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4778 {
4779         int err;
4780
4781         tg3_stop_fw(tp);
4782
4783         tg3_write_sig_pre_reset(tp, kind);
4784
4785         tg3_abort_hw(tp, silent);
4786         err = tg3_chip_reset(tp);
4787
4788         tg3_write_sig_legacy(tp, kind);
4789         tg3_write_sig_post_reset(tp, kind);
4790
4791         if (err)
4792                 return err;
4793
4794         return 0;
4795 }
4796
4797 #define TG3_FW_RELEASE_MAJOR    0x0
4798 #define TG3_FW_RELASE_MINOR     0x0
4799 #define TG3_FW_RELEASE_FIX      0x0
4800 #define TG3_FW_START_ADDR       0x08000000
4801 #define TG3_FW_TEXT_ADDR        0x08000000
4802 #define TG3_FW_TEXT_LEN         0x9c0
4803 #define TG3_FW_RODATA_ADDR      0x080009c0
4804 #define TG3_FW_RODATA_LEN       0x60
4805 #define TG3_FW_DATA_ADDR        0x08000a40
4806 #define TG3_FW_DATA_LEN         0x20
4807 #define TG3_FW_SBSS_ADDR        0x08000a60
4808 #define TG3_FW_SBSS_LEN         0xc
4809 #define TG3_FW_BSS_ADDR         0x08000a70
4810 #define TG3_FW_BSS_LEN          0x10
4811
4812 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4813         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4814         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4815         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4816         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4817         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4818         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4819         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4820         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4821         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4822         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4823         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4824         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4825         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4826         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4827         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4828         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4829         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4830         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4831         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4832         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4833         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4834         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4835         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4836         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4837         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4838         0, 0, 0, 0, 0, 0,
4839         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4840         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4841         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4842         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4843         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4844         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4845         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4846         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4847         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4848         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4850         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4851         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4852         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4853         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4854         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4855         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4856         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4857         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4858         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4859         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4860         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4861         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4862         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4863         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4864         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4865         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4866         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4867         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4868         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4869         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4870         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4871         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4872         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4873         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4874         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4875         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4876         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4877         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4878         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4879         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4880         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4881         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4882         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4883         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4884         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4885         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4886         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4887         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4888         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4889         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4890         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4891         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4892         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4893         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4894         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4895         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4896         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4897         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4898         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4899         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4900         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4901         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4902         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4903         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4904 };
4905
4906 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4907         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4908         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4909         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4910         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4911         0x00000000
4912 };
4913
4914 #if 0 /* All zeros, don't eat up space with it. */
4915 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4916         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4917         0x00000000, 0x00000000, 0x00000000, 0x00000000
4918 };
4919 #endif
4920
4921 #define RX_CPU_SCRATCH_BASE     0x30000
4922 #define RX_CPU_SCRATCH_SIZE     0x04000
4923 #define TX_CPU_SCRATCH_BASE     0x34000
4924 #define TX_CPU_SCRATCH_SIZE     0x04000
4925
4926 /* tp->lock is held. */
4927 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4928 {
4929         int i;
4930
4931         if (offset == TX_CPU_BASE &&
4932             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4933                 BUG();
4934
4935         if (offset == RX_CPU_BASE) {
4936                 for (i = 0; i < 10000; i++) {
4937                         tw32(offset + CPU_STATE, 0xffffffff);
4938                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4939                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4940                                 break;
4941                 }
4942
4943                 tw32(offset + CPU_STATE, 0xffffffff);
4944                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4945                 udelay(10);
4946         } else {
4947                 for (i = 0; i < 10000; i++) {
4948                         tw32(offset + CPU_STATE, 0xffffffff);
4949                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4950                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4951                                 break;
4952                 }
4953         }
4954
4955         if (i >= 10000) {
4956                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4957                        "and %s CPU\n",
4958                        tp->dev->name,
4959                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4960                 return -ENODEV;
4961         }
4962
4963         /* Clear firmware's nvram arbitration. */
4964         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4965                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4966         return 0;
4967 }
4968
4969 struct fw_info {
4970         unsigned int text_base;
4971         unsigned int text_len;
4972         u32 *text_data;
4973         unsigned int rodata_base;
4974         unsigned int rodata_len;
4975         u32 *rodata_data;
4976         unsigned int data_base;
4977         unsigned int data_len;
4978         u32 *data_data;
4979 };
4980
4981 /* tp->lock is held. */
4982 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4983                                  int cpu_scratch_size, struct fw_info *info)
4984 {
4985         int err, lock_err, i;
4986         void (*write_op)(struct tg3 *, u32, u32);
4987
4988         if (cpu_base == TX_CPU_BASE &&
4989             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4990                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4991                        "TX cpu firmware on %s which is 5705.\n",
4992                        tp->dev->name);
4993                 return -EINVAL;
4994         }
4995
4996         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4997                 write_op = tg3_write_mem;
4998         else
4999                 write_op = tg3_write_indirect_reg32;
5000
5001         /* It is possible that bootcode is still loading at this point.
5002          * Get the nvram lock first before halting the cpu.
5003          */
5004         lock_err = tg3_nvram_lock(tp);
5005         err = tg3_halt_cpu(tp, cpu_base);
5006         if (!lock_err)
5007                 tg3_nvram_unlock(tp);
5008         if (err)
5009                 goto out;
5010
5011         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5012                 write_op(tp, cpu_scratch_base + i, 0);
5013         tw32(cpu_base + CPU_STATE, 0xffffffff);
5014         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5015         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5016                 write_op(tp, (cpu_scratch_base +
5017                               (info->text_base & 0xffff) +
5018                               (i * sizeof(u32))),
5019                          (info->text_data ?
5020                           info->text_data[i] : 0));
5021         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5022                 write_op(tp, (cpu_scratch_base +
5023                               (info->rodata_base & 0xffff) +
5024                               (i * sizeof(u32))),
5025                          (info->rodata_data ?
5026                           info->rodata_data[i] : 0));
5027         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5028                 write_op(tp, (cpu_scratch_base +
5029                               (info->data_base & 0xffff) +
5030                               (i * sizeof(u32))),
5031                          (info->data_data ?
5032                           info->data_data[i] : 0));
5033
5034         err = 0;
5035
5036 out:
5037         return err;
5038 }
5039
5040 /* tp->lock is held. */
5041 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5042 {
5043         struct fw_info info;
5044         int err, i;
5045
5046         info.text_base = TG3_FW_TEXT_ADDR;
5047         info.text_len = TG3_FW_TEXT_LEN;
5048         info.text_data = &tg3FwText[0];
5049         info.rodata_base = TG3_FW_RODATA_ADDR;
5050         info.rodata_len = TG3_FW_RODATA_LEN;
5051         info.rodata_data = &tg3FwRodata[0];
5052         info.data_base = TG3_FW_DATA_ADDR;
5053         info.data_len = TG3_FW_DATA_LEN;
5054         info.data_data = NULL;
5055
5056         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5057                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5058                                     &info);
5059         if (err)
5060                 return err;
5061
5062         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5063                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5064                                     &info);
5065         if (err)
5066                 return err;
5067
5068         /* Now startup only the RX cpu. */
5069         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5070         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5071
5072         for (i = 0; i < 5; i++) {
5073                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5074                         break;
5075                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5076                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5077                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5078                 udelay(1000);
5079         }
5080         if (i >= 5) {
5081                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5082                        "to set RX CPU PC, is %08x should be %08x\n",
5083                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5084                        TG3_FW_TEXT_ADDR);
5085                 return -ENODEV;
5086         }
5087         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5088         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5089
5090         return 0;
5091 }
5092
5093 #if TG3_TSO_SUPPORT != 0
5094
5095 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5096 #define TG3_TSO_FW_RELASE_MINOR         0x6
5097 #define TG3_TSO_FW_RELEASE_FIX          0x0
5098 #define TG3_TSO_FW_START_ADDR           0x08000000
5099 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5100 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5101 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5102 #define TG3_TSO_FW_RODATA_LEN           0x60
5103 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5104 #define TG3_TSO_FW_DATA_LEN             0x30
5105 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5106 #define TG3_TSO_FW_SBSS_LEN             0x2c
5107 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5108 #define TG3_TSO_FW_BSS_LEN              0x894
5109
5110 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5111         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5112         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5113         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5114         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5115         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5116         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5117         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5118         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5119         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5120         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5121         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5122         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5123         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5124         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5125         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5126         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5127         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5128         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5129         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5130         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5131         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5132         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5133         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5134         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5135         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5136         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5137         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5138         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5139         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5140         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5141         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5142         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5143         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5144         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5145         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5146         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5147         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5148         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5149         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5150         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5151         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5152         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5153         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5154         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5155         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5156         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5157         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5158         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5159         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5160         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5161         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5162         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5163         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5164         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5165         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5166         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5167         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5168         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5169         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5170         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5171         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5172         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5173         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5174         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5175         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5176         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5177         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5178         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5179         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5180         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5181         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5182         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5183         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5184         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5185         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5186         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5187         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5188         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5189         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5190         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5191         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5192         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5193         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5194         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5195         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5196         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5197         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5198         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5199         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5200         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5201         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5202         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5203         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5204         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5205         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5206         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5207         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5208         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5209         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5210         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5211         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5212         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5213         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5214         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5215         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5216         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5217         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5218         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5219         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5220         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5221         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5222         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5223         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5224         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5225         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5226         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5227         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5228         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5229         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5230         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5231         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5232         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5233         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5234         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5235         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5236         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5237         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5238         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5239         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5240         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5241         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5242         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5243         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5244         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5245         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5246         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5247         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5248         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5249         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5250         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5251         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5252         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5253         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5254         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5255         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5256         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5257         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5258         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5259         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5260         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5261         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5262         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5263         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5264         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5265         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5266         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5267         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5268         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5269         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5270         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5271         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5272         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5273         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5274         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5275         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5276         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5277         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5278         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5279         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5280         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5281         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5282         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5283         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5284         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5285         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5286         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5287         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5288         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5289         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5290         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5291         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5292         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5293         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5294         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5295         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5296         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5297         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5298         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5299         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5300         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5301         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5302         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5303         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5304         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5305         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5306         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5307         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5308         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5309         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5310         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5311         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5312         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5313         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5314         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5315         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5316         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5317         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5318         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5319         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5320         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5321         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5322         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5323         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5324         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5325         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5326         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5327         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5328         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5329         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5330         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5331         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5332         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5333         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5334         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5335         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5336         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5337         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5338         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5339         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5340         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5341         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5342         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5343         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5344         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5345         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5346         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5347         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5348         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5349         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5350         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5351         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5352         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5353         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5354         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5355         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5356         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5357         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5358         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5359         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5360         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5361         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5362         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5363         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5364         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5365         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5366         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5367         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5368         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5369         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5370         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5371         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5372         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5373         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5374         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5375         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5376         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5377         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5378         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5379         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5380         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5381         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5382         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5383         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5384         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5385         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5386         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5387         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5388         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5389         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5390         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5391         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5392         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5393         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5394         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5395 };
5396
5397 static u32 tg3TsoFwRodata[] = {
5398         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5399         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5400         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5401         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5402         0x00000000,
5403 };
5404
5405 static u32 tg3TsoFwData[] = {
5406         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5407         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5408         0x00000000,
5409 };
5410
5411 /* 5705 needs a special version of the TSO firmware.  */
5412 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5413 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5414 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5415 #define TG3_TSO5_FW_START_ADDR          0x00010000
5416 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5417 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5418 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5419 #define TG3_TSO5_FW_RODATA_LEN          0x50
5420 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5421 #define TG3_TSO5_FW_DATA_LEN            0x20
5422 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5423 #define TG3_TSO5_FW_SBSS_LEN            0x28
5424 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5425 #define TG3_TSO5_FW_BSS_LEN             0x88
5426
5427 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5428         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5429         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5430         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5431         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5432         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5433         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5434         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5435         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5436         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5437         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5438         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5439         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5440         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5441         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5442         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5443         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5444         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5445         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5446         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5447         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5448         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5449         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5450         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5451         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5452         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5453         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5454         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5455         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5456         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5457         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5458         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5459         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5460         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5461         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5462         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5463         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5464         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5465         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5466         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5467         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5468         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5469         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5470         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5471         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5472         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5473         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5474         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5475         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5476         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5477         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5478         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5479         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5480         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5481         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5482         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5483         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5484         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5485         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5486         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5487         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5488         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5489         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5490         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5491         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5492         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5493         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5494         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5495         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5496         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5497         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5498         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5499         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5500         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5501         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5502         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5503         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5504         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5505         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5506         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5507         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5508         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5509         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5510         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5511         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5512         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5513         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5514         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5515         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5516         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5517         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5518         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5519         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5520         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5521         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5522         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5523         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5524         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5525         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5526         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5527         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5528         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5529         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5530         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5531         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5532         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5533         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5534         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5535         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5536         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5537         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5538         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5539         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5540         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5541         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5542         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5543         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5544         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5545         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5546         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5547         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5548         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5549         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5550         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5551         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5552         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5553         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5554         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5555         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5556         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5557         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5558         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5559         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5560         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5561         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5562         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5563         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5564         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5565         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5566         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5567         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5568         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5569         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5570         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5571         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5572         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5573         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5574         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5575         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5576         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5577         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5578         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5579         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5580         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5581         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5582         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5583         0x00000000, 0x00000000, 0x00000000,
5584 };
5585
5586 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5587         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5588         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5589         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5594         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5595         0x00000000, 0x00000000, 0x00000000,
5596 };
5597
5598 /* tp->lock is held. */
5599 static int tg3_load_tso_firmware(struct tg3 *tp)
5600 {
5601         struct fw_info info;
5602         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5603         int err, i;
5604
5605         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5606                 return 0;
5607
5608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5609                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5610                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5611                 info.text_data = &tg3Tso5FwText[0];
5612                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5613                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5614                 info.rodata_data = &tg3Tso5FwRodata[0];
5615                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5616                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5617                 info.data_data = &tg3Tso5FwData[0];
5618                 cpu_base = RX_CPU_BASE;
5619                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5620                 cpu_scratch_size = (info.text_len +
5621                                     info.rodata_len +
5622                                     info.data_len +
5623                                     TG3_TSO5_FW_SBSS_LEN +
5624                                     TG3_TSO5_FW_BSS_LEN);
5625         } else {
5626                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5627                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5628                 info.text_data = &tg3TsoFwText[0];
5629                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5630                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5631                 info.rodata_data = &tg3TsoFwRodata[0];
5632                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5633                 info.data_len = TG3_TSO_FW_DATA_LEN;
5634                 info.data_data = &tg3TsoFwData[0];
5635                 cpu_base = TX_CPU_BASE;
5636                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5637                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5638         }
5639
5640         err = tg3_load_firmware_cpu(tp, cpu_base,
5641                                     cpu_scratch_base, cpu_scratch_size,
5642                                     &info);
5643         if (err)
5644                 return err;
5645
5646         /* Now startup the cpu. */
5647         tw32(cpu_base + CPU_STATE, 0xffffffff);
5648         tw32_f(cpu_base + CPU_PC,    info.text_base);
5649
5650         for (i = 0; i < 5; i++) {
5651                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5652                         break;
5653                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5654                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5655                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5656                 udelay(1000);
5657         }
5658         if (i >= 5) {
5659                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5660                        "to set CPU PC, is %08x should be %08x\n",
5661                        tp->dev->name, tr32(cpu_base + CPU_PC),
5662                        info.text_base);
5663                 return -ENODEV;
5664         }
5665         tw32(cpu_base + CPU_STATE, 0xffffffff);
5666         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5667         return 0;
5668 }
5669
5670 #endif /* TG3_TSO_SUPPORT != 0 */
5671
5672 /* tp->lock is held. */
5673 static void __tg3_set_mac_addr(struct tg3 *tp)
5674 {
5675         u32 addr_high, addr_low;
5676         int i;
5677
5678         addr_high = ((tp->dev->dev_addr[0] << 8) |
5679                      tp->dev->dev_addr[1]);
5680         addr_low = ((tp->dev->dev_addr[2] << 24) |
5681                     (tp->dev->dev_addr[3] << 16) |
5682                     (tp->dev->dev_addr[4] <<  8) |
5683                     (tp->dev->dev_addr[5] <<  0));
5684         for (i = 0; i < 4; i++) {
5685                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5686                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5687         }
5688
5689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5690             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5691                 for (i = 0; i < 12; i++) {
5692                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5693                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5694                 }
5695         }
5696
5697         addr_high = (tp->dev->dev_addr[0] +
5698                      tp->dev->dev_addr[1] +
5699                      tp->dev->dev_addr[2] +
5700                      tp->dev->dev_addr[3] +
5701                      tp->dev->dev_addr[4] +
5702                      tp->dev->dev_addr[5]) &
5703                 TX_BACKOFF_SEED_MASK;
5704         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5705 }
5706
5707 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5708 {
5709         struct tg3 *tp = netdev_priv(dev);
5710         struct sockaddr *addr = p;
5711
5712         if (!is_valid_ether_addr(addr->sa_data))
5713                 return -EINVAL;
5714
5715         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5716
5717         if (!netif_running(dev))
5718                 return 0;
5719
5720         spin_lock_bh(&tp->lock);
5721         __tg3_set_mac_addr(tp);
5722         spin_unlock_bh(&tp->lock);
5723
5724         return 0;
5725 }
5726
5727 /* tp->lock is held. */
5728 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5729                            dma_addr_t mapping, u32 maxlen_flags,
5730                            u32 nic_addr)
5731 {
5732         tg3_write_mem(tp,
5733                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5734                       ((u64) mapping >> 32));
5735         tg3_write_mem(tp,
5736                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5737                       ((u64) mapping & 0xffffffff));
5738         tg3_write_mem(tp,
5739                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5740                        maxlen_flags);
5741
5742         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5743                 tg3_write_mem(tp,
5744                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5745                               nic_addr);
5746 }
5747
5748 static void __tg3_set_rx_mode(struct net_device *);
5749 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5750 {
5751         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5752         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5753         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5754         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5755         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5756                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5757                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5758         }
5759         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5760         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5761         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5762                 u32 val = ec->stats_block_coalesce_usecs;
5763
5764                 if (!netif_carrier_ok(tp->dev))
5765                         val = 0;
5766
5767                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5768         }
5769 }
5770
5771 /* tp->lock is held. */
5772 static int tg3_reset_hw(struct tg3 *tp)
5773 {
5774         u32 val, rdmac_mode;
5775         int i, err, limit;
5776
5777         tg3_disable_ints(tp);
5778
5779         tg3_stop_fw(tp);
5780
5781         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5782
5783         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5784                 tg3_abort_hw(tp, 1);
5785         }
5786
5787         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5788                 tg3_phy_reset(tp);
5789
5790         err = tg3_chip_reset(tp);
5791         if (err)
5792                 return err;
5793
5794         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5795
5796         /* This works around an issue with Athlon chipsets on
5797          * B3 tigon3 silicon.  This bit has no effect on any
5798          * other revision.  But do not set this on PCI Express
5799          * chips.
5800          */
5801         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5802                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5803         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5804
5805         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5806             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5807                 val = tr32(TG3PCI_PCISTATE);
5808                 val |= PCISTATE_RETRY_SAME_DMA;
5809                 tw32(TG3PCI_PCISTATE, val);
5810         }
5811
5812         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5813                 /* Enable some hw fixes.  */
5814                 val = tr32(TG3PCI_MSI_DATA);
5815                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5816                 tw32(TG3PCI_MSI_DATA, val);
5817         }
5818
5819         /* Descriptor ring init may make accesses to the
5820          * NIC SRAM area to setup the TX descriptors, so we
5821          * can only do this after the hardware has been
5822          * successfully reset.
5823          */
5824         tg3_init_rings(tp);
5825
5826         /* This value is determined during the probe time DMA
5827          * engine test, tg3_test_dma.
5828          */
5829         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5830
5831         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5832                           GRC_MODE_4X_NIC_SEND_RINGS |
5833                           GRC_MODE_NO_TX_PHDR_CSUM |
5834                           GRC_MODE_NO_RX_PHDR_CSUM);
5835         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5836         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5837                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5838         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5839                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5840
5841         tw32(GRC_MODE,
5842              tp->grc_mode |
5843              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5844
5845         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5846         val = tr32(GRC_MISC_CFG);
5847         val &= ~0xff;
5848         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5849         tw32(GRC_MISC_CFG, val);
5850
5851         /* Initialize MBUF/DESC pool. */
5852         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5853                 /* Do nothing.  */
5854         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5855                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5856                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5857                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5858                 else
5859                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5860                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5861                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5862         }
5863 #if TG3_TSO_SUPPORT != 0
5864         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5865                 int fw_len;
5866
5867                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5868                           TG3_TSO5_FW_RODATA_LEN +
5869                           TG3_TSO5_FW_DATA_LEN +
5870                           TG3_TSO5_FW_SBSS_LEN +
5871                           TG3_TSO5_FW_BSS_LEN);
5872                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5873                 tw32(BUFMGR_MB_POOL_ADDR,
5874                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5875                 tw32(BUFMGR_MB_POOL_SIZE,
5876                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5877         }
5878 #endif
5879
5880         if (tp->dev->mtu <= ETH_DATA_LEN) {
5881                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5882                      tp->bufmgr_config.mbuf_read_dma_low_water);
5883                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5884                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5885                 tw32(BUFMGR_MB_HIGH_WATER,
5886                      tp->bufmgr_config.mbuf_high_water);
5887         } else {
5888                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5889                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5890                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5891                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5892                 tw32(BUFMGR_MB_HIGH_WATER,
5893                      tp->bufmgr_config.mbuf_high_water_jumbo);
5894         }
5895         tw32(BUFMGR_DMA_LOW_WATER,
5896              tp->bufmgr_config.dma_low_water);
5897         tw32(BUFMGR_DMA_HIGH_WATER,
5898              tp->bufmgr_config.dma_high_water);
5899
5900         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5901         for (i = 0; i < 2000; i++) {
5902                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5903                         break;
5904                 udelay(10);
5905         }
5906         if (i >= 2000) {
5907                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5908                        tp->dev->name);
5909                 return -ENODEV;
5910         }
5911
5912         /* Setup replenish threshold. */
5913         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5914
5915         /* Initialize TG3_BDINFO's at:
5916          *  RCVDBDI_STD_BD:     standard eth size rx ring
5917          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5918          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5919          *
5920          * like so:
5921          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5922          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5923          *                              ring attribute flags
5924          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5925          *
5926          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5927          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5928          *
5929          * The size of each ring is fixed in the firmware, but the location is
5930          * configurable.
5931          */
5932         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5933              ((u64) tp->rx_std_mapping >> 32));
5934         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5935              ((u64) tp->rx_std_mapping & 0xffffffff));
5936         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5937              NIC_SRAM_RX_BUFFER_DESC);
5938
5939         /* Don't even try to program the JUMBO/MINI buffer descriptor
5940          * configs on 5705.
5941          */
5942         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5943                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5944                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5945         } else {
5946                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5947                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5948
5949                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5950                      BDINFO_FLAGS_DISABLED);
5951
5952                 /* Setup replenish threshold. */
5953                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5954
5955                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5956                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5957                              ((u64) tp->rx_jumbo_mapping >> 32));
5958                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5959                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5960                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5961                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5962                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5963                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5964                 } else {
5965                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5966                              BDINFO_FLAGS_DISABLED);
5967                 }
5968
5969         }
5970
5971         /* There is only one send ring on 5705/5750, no need to explicitly
5972          * disable the others.
5973          */
5974         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5975                 /* Clear out send RCB ring in SRAM. */
5976                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5977                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5978                                       BDINFO_FLAGS_DISABLED);
5979         }
5980
5981         tp->tx_prod = 0;
5982         tp->tx_cons = 0;
5983         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5984         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5985
5986         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5987                        tp->tx_desc_mapping,
5988                        (TG3_TX_RING_SIZE <<
5989                         BDINFO_FLAGS_MAXLEN_SHIFT),
5990                        NIC_SRAM_TX_BUFFER_DESC);
5991
5992         /* There is only one receive return ring on 5705/5750, no need
5993          * to explicitly disable the others.
5994          */
5995         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5996                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5997                      i += TG3_BDINFO_SIZE) {
5998                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5999                                       BDINFO_FLAGS_DISABLED);
6000                 }
6001         }
6002
6003         tp->rx_rcb_ptr = 0;
6004         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6005
6006         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6007                        tp->rx_rcb_mapping,
6008                        (TG3_RX_RCB_RING_SIZE(tp) <<
6009                         BDINFO_FLAGS_MAXLEN_SHIFT),
6010                        0);
6011
6012         tp->rx_std_ptr = tp->rx_pending;
6013         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6014                      tp->rx_std_ptr);
6015
6016         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6017                                                 tp->rx_jumbo_pending : 0;
6018         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6019                      tp->rx_jumbo_ptr);
6020
6021         /* Initialize MAC address and backoff seed. */
6022         __tg3_set_mac_addr(tp);
6023
6024         /* MTU + ethernet header + FCS + optional VLAN tag */
6025         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6026
6027         /* The slot time is changed by tg3_setup_phy if we
6028          * run at gigabit with half duplex.
6029          */
6030         tw32(MAC_TX_LENGTHS,
6031              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6032              (6 << TX_LENGTHS_IPG_SHIFT) |
6033              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6034
6035         /* Receive rules. */
6036         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6037         tw32(RCVLPC_CONFIG, 0x0181);
6038
6039         /* Calculate RDMAC_MODE setting early, we need it to determine
6040          * the RCVLPC_STATE_ENABLE mask.
6041          */
6042         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6043                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6044                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6045                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6046                       RDMAC_MODE_LNGREAD_ENAB);
6047         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6048                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6049
6050         /* If statement applies to 5705 and 5750 PCI devices only */
6051         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6052              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6053             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6054                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6055                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6056                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6057                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6058                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6059                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6060                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6061                 }
6062         }
6063
6064         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6065                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6066
6067 #if TG3_TSO_SUPPORT != 0
6068         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6069                 rdmac_mode |= (1 << 27);
6070 #endif
6071
6072         /* Receive/send statistics. */
6073         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6074             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6075                 val = tr32(RCVLPC_STATS_ENABLE);
6076                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6077                 tw32(RCVLPC_STATS_ENABLE, val);
6078         } else {
6079                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6080         }
6081         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6082         tw32(SNDDATAI_STATSENAB, 0xffffff);
6083         tw32(SNDDATAI_STATSCTRL,
6084              (SNDDATAI_SCTRL_ENABLE |
6085               SNDDATAI_SCTRL_FASTUPD));
6086
6087         /* Setup host coalescing engine. */
6088         tw32(HOSTCC_MODE, 0);
6089         for (i = 0; i < 2000; i++) {
6090                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6091                         break;
6092                 udelay(10);
6093         }
6094
6095         __tg3_set_coalesce(tp, &tp->coal);
6096
6097         /* set status block DMA address */
6098         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6099              ((u64) tp->status_mapping >> 32));
6100         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6101              ((u64) tp->status_mapping & 0xffffffff));
6102
6103         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6104                 /* Status/statistics block address.  See tg3_timer,
6105                  * the tg3_periodic_fetch_stats call there, and
6106                  * tg3_get_stats to see how this works for 5705/5750 chips.
6107                  */
6108                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6109                      ((u64) tp->stats_mapping >> 32));
6110                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6111                      ((u64) tp->stats_mapping & 0xffffffff));
6112                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6113                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6114         }
6115
6116         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6117
6118         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6119         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6120         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6121                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6122
6123         /* Clear statistics/status block in chip, and status block in ram. */
6124         for (i = NIC_SRAM_STATS_BLK;
6125              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6126              i += sizeof(u32)) {
6127                 tg3_write_mem(tp, i, 0);
6128                 udelay(40);
6129         }
6130         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6131
6132         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6133                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6134                 /* reset to prevent losing 1st rx packet intermittently */
6135                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6136                 udelay(10);
6137         }
6138
6139         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6140                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6141         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6142         udelay(40);
6143
6144         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6145          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6146          * register to preserve the GPIO settings for LOMs. The GPIOs,
6147          * whether used as inputs or outputs, are set by boot code after
6148          * reset.
6149          */
6150         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6151                 u32 gpio_mask;
6152
6153                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6154                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6155
6156                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6157                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6158                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6159
6160                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6161                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6162
6163                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6164
6165                 /* GPIO1 must be driven high for eeprom write protect */
6166                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6167                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6168         }
6169         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6170         udelay(100);
6171
6172         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6173         tp->last_tag = 0;
6174
6175         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6176                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6177                 udelay(40);
6178         }
6179
6180         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6181                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6182                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6183                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6184                WDMAC_MODE_LNGREAD_ENAB);
6185
6186         /* If statement applies to 5705 and 5750 PCI devices only */
6187         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6188              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6189             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6190                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6191                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6192                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6193                         /* nothing */
6194                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6195                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6196                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6197                         val |= WDMAC_MODE_RX_ACCEL;
6198                 }
6199         }
6200
6201         /* Enable host coalescing bug fix */
6202         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6203             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6204                 val |= (1 << 29);
6205
6206         tw32_f(WDMAC_MODE, val);
6207         udelay(40);
6208
6209         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6210                 val = tr32(TG3PCI_X_CAPS);
6211                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6212                         val &= ~PCIX_CAPS_BURST_MASK;
6213                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6214                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6215                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6216                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6217                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6218                                 val |= (tp->split_mode_max_reqs <<
6219                                         PCIX_CAPS_SPLIT_SHIFT);
6220                 }
6221                 tw32(TG3PCI_X_CAPS, val);
6222         }
6223
6224         tw32_f(RDMAC_MODE, rdmac_mode);
6225         udelay(40);
6226
6227         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6228         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6229                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6230         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6231         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6232         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6233         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6234         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6235 #if TG3_TSO_SUPPORT != 0
6236         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6237                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6238 #endif
6239         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6240         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6241
6242         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6243                 err = tg3_load_5701_a0_firmware_fix(tp);
6244                 if (err)
6245                         return err;
6246         }
6247
6248 #if TG3_TSO_SUPPORT != 0
6249         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6250                 err = tg3_load_tso_firmware(tp);
6251                 if (err)
6252                         return err;
6253         }
6254 #endif
6255
6256         tp->tx_mode = TX_MODE_ENABLE;
6257         tw32_f(MAC_TX_MODE, tp->tx_mode);
6258         udelay(100);
6259
6260         tp->rx_mode = RX_MODE_ENABLE;
6261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6262                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6263
6264         tw32_f(MAC_RX_MODE, tp->rx_mode);
6265         udelay(10);
6266
6267         if (tp->link_config.phy_is_low_power) {
6268                 tp->link_config.phy_is_low_power = 0;
6269                 tp->link_config.speed = tp->link_config.orig_speed;
6270                 tp->link_config.duplex = tp->link_config.orig_duplex;
6271                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6272         }
6273
6274         tp->mi_mode = MAC_MI_MODE_BASE;
6275         tw32_f(MAC_MI_MODE, tp->mi_mode);
6276         udelay(80);
6277
6278         tw32(MAC_LED_CTRL, tp->led_ctrl);
6279
6280         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6281         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6282                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6283                 udelay(10);
6284         }
6285         tw32_f(MAC_RX_MODE, tp->rx_mode);
6286         udelay(10);
6287
6288         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6289                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6290                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6291                         /* Set drive transmission level to 1.2V  */
6292                         /* only if the signal pre-emphasis bit is not set  */
6293                         val = tr32(MAC_SERDES_CFG);
6294                         val &= 0xfffff000;
6295                         val |= 0x880;
6296                         tw32(MAC_SERDES_CFG, val);
6297                 }
6298                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6299                         tw32(MAC_SERDES_CFG, 0x616000);
6300         }
6301
6302         /* Prevent chip from dropping frames when flow control
6303          * is enabled.
6304          */
6305         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6306
6307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6308             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6309                 /* Use hardware link auto-negotiation */
6310                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6311         }
6312
6313         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6314             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6315                 u32 tmp;
6316
6317                 tmp = tr32(SERDES_RX_CTRL);
6318                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6319                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6320                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6321                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6322         }
6323
6324         err = tg3_setup_phy(tp, 1);
6325         if (err)
6326                 return err;
6327
6328         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6329                 u32 tmp;
6330
6331                 /* Clear CRC stats. */
6332                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6333                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6334                         tg3_readphy(tp, 0x14, &tmp);
6335                 }
6336         }
6337
6338         __tg3_set_rx_mode(tp->dev);
6339
6340         /* Initialize receive rules. */
6341         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6342         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6343         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6344         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6345
6346         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6347             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6348                 limit = 8;
6349         else
6350                 limit = 16;
6351         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6352                 limit -= 4;
6353         switch (limit) {
6354         case 16:
6355                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6356         case 15:
6357                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6358         case 14:
6359                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6360         case 13:
6361                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6362         case 12:
6363                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6364         case 11:
6365                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6366         case 10:
6367                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6368         case 9:
6369                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6370         case 8:
6371                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6372         case 7:
6373                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6374         case 6:
6375                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6376         case 5:
6377                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6378         case 4:
6379                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6380         case 3:
6381                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6382         case 2:
6383         case 1:
6384
6385         default:
6386                 break;
6387         };
6388
6389         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6390
6391         return 0;
6392 }
6393
6394 /* Called at device open time to get the chip ready for
6395  * packet processing.  Invoked with tp->lock held.
6396  */
6397 static int tg3_init_hw(struct tg3 *tp)
6398 {
6399         int err;
6400
6401         /* Force the chip into D0. */
6402         err = tg3_set_power_state(tp, PCI_D0);
6403         if (err)
6404                 goto out;
6405
6406         tg3_switch_clocks(tp);
6407
6408         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6409
6410         err = tg3_reset_hw(tp);
6411
6412 out:
6413         return err;
6414 }
6415
6416 #define TG3_STAT_ADD32(PSTAT, REG) \
6417 do {    u32 __val = tr32(REG); \
6418         (PSTAT)->low += __val; \
6419         if ((PSTAT)->low < __val) \
6420                 (PSTAT)->high += 1; \
6421 } while (0)
6422
6423 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6424 {
6425         struct tg3_hw_stats *sp = tp->hw_stats;
6426
6427         if (!netif_carrier_ok(tp->dev))
6428                 return;
6429
6430         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6431         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6432         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6433         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6434         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6435         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6436         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6437         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6438         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6439         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6440         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6441         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6442         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6443
6444         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6445         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6446         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6447         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6448         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6449         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6450         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6451         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6452         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6453         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6454         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6455         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6456         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6457         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6458 }
6459
6460 static void tg3_timer(unsigned long __opaque)
6461 {
6462         struct tg3 *tp = (struct tg3 *) __opaque;
6463
6464         spin_lock(&tp->lock);
6465
6466         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6467                 /* All of this garbage is because when using non-tagged
6468                  * IRQ status the mailbox/status_block protocol the chip
6469                  * uses with the cpu is race prone.
6470                  */
6471                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6472                         tw32(GRC_LOCAL_CTRL,
6473                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6474                 } else {
6475                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6476                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6477                 }
6478
6479                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6480                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6481                         spin_unlock(&tp->lock);
6482                         schedule_work(&tp->reset_task);
6483                         return;
6484                 }
6485         }
6486
6487         /* This part only runs once per second. */
6488         if (!--tp->timer_counter) {
6489                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6490                         tg3_periodic_fetch_stats(tp);
6491
6492                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6493                         u32 mac_stat;
6494                         int phy_event;
6495
6496                         mac_stat = tr32(MAC_STATUS);
6497
6498                         phy_event = 0;
6499                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6500                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6501                                         phy_event = 1;
6502                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6503                                 phy_event = 1;
6504
6505                         if (phy_event)
6506                                 tg3_setup_phy(tp, 0);
6507                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6508                         u32 mac_stat = tr32(MAC_STATUS);
6509                         int need_setup = 0;
6510
6511                         if (netif_carrier_ok(tp->dev) &&
6512                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6513                                 need_setup = 1;
6514                         }
6515                         if (! netif_carrier_ok(tp->dev) &&
6516                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6517                                          MAC_STATUS_SIGNAL_DET))) {
6518                                 need_setup = 1;
6519                         }
6520                         if (need_setup) {
6521                                 tw32_f(MAC_MODE,
6522                                      (tp->mac_mode &
6523                                       ~MAC_MODE_PORT_MODE_MASK));
6524                                 udelay(40);
6525                                 tw32_f(MAC_MODE, tp->mac_mode);
6526                                 udelay(40);
6527                                 tg3_setup_phy(tp, 0);
6528                         }
6529                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6530                         tg3_serdes_parallel_detect(tp);
6531
6532                 tp->timer_counter = tp->timer_multiplier;
6533         }
6534
6535         /* Heartbeat is only sent once every 2 seconds.  */
6536         if (!--tp->asf_counter) {
6537                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6538                         u32 val;
6539
6540                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6541                                            FWCMD_NICDRV_ALIVE2);
6542                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6543                         /* 5 seconds timeout */
6544                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6545                         val = tr32(GRC_RX_CPU_EVENT);
6546                         val |= (1 << 14);
6547                         tw32(GRC_RX_CPU_EVENT, val);
6548                 }
6549                 tp->asf_counter = tp->asf_multiplier;
6550         }
6551
6552         spin_unlock(&tp->lock);
6553
6554         tp->timer.expires = jiffies + tp->timer_offset;
6555         add_timer(&tp->timer);
6556 }
6557
6558 static int tg3_request_irq(struct tg3 *tp)
6559 {
6560         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6561         unsigned long flags;
6562         struct net_device *dev = tp->dev;
6563
6564         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6565                 fn = tg3_msi;
6566                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6567                         fn = tg3_msi_1shot;
6568                 flags = SA_SAMPLE_RANDOM;
6569         } else {
6570                 fn = tg3_interrupt;
6571                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6572                         fn = tg3_interrupt_tagged;
6573                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6574         }
6575         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6576 }
6577
6578 static int tg3_test_interrupt(struct tg3 *tp)
6579 {
6580         struct net_device *dev = tp->dev;
6581         int err, i;
6582         u32 int_mbox = 0;
6583
6584         if (!netif_running(dev))
6585                 return -ENODEV;
6586
6587         tg3_disable_ints(tp);
6588
6589         free_irq(tp->pdev->irq, dev);
6590
6591         err = request_irq(tp->pdev->irq, tg3_test_isr,
6592                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6593         if (err)
6594                 return err;
6595
6596         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6597         tg3_enable_ints(tp);
6598
6599         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6600                HOSTCC_MODE_NOW);
6601
6602         for (i = 0; i < 5; i++) {
6603                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6604                                         TG3_64BIT_REG_LOW);
6605                 if (int_mbox != 0)
6606                         break;
6607                 msleep(10);
6608         }
6609
6610         tg3_disable_ints(tp);
6611
6612         free_irq(tp->pdev->irq, dev);
6613         
6614         err = tg3_request_irq(tp);
6615
6616         if (err)
6617                 return err;
6618
6619         if (int_mbox != 0)
6620                 return 0;
6621
6622         return -EIO;
6623 }
6624
6625 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6626  * successfully restored
6627  */
6628 static int tg3_test_msi(struct tg3 *tp)
6629 {
6630         struct net_device *dev = tp->dev;
6631         int err;
6632         u16 pci_cmd;
6633
6634         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6635                 return 0;
6636
6637         /* Turn off SERR reporting in case MSI terminates with Master
6638          * Abort.
6639          */
6640         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6641         pci_write_config_word(tp->pdev, PCI_COMMAND,
6642                               pci_cmd & ~PCI_COMMAND_SERR);
6643
6644         err = tg3_test_interrupt(tp);
6645
6646         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6647
6648         if (!err)
6649                 return 0;
6650
6651         /* other failures */
6652         if (err != -EIO)
6653                 return err;
6654
6655         /* MSI test failed, go back to INTx mode */
6656         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6657                "switching to INTx mode. Please report this failure to "
6658                "the PCI maintainer and include system chipset information.\n",
6659                        tp->dev->name);
6660
6661         free_irq(tp->pdev->irq, dev);
6662         pci_disable_msi(tp->pdev);
6663
6664         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6665
6666         err = tg3_request_irq(tp);
6667         if (err)
6668                 return err;
6669
6670         /* Need to reset the chip because the MSI cycle may have terminated
6671          * with Master Abort.
6672          */
6673         tg3_full_lock(tp, 1);
6674
6675         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6676         err = tg3_init_hw(tp);
6677
6678         tg3_full_unlock(tp);
6679
6680         if (err)
6681                 free_irq(tp->pdev->irq, dev);
6682
6683         return err;
6684 }
6685
6686 static int tg3_open(struct net_device *dev)
6687 {
6688         struct tg3 *tp = netdev_priv(dev);
6689         int err;
6690
6691         tg3_full_lock(tp, 0);
6692
6693         err = tg3_set_power_state(tp, PCI_D0);
6694         if (err)
6695                 return err;
6696
6697         tg3_disable_ints(tp);
6698         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6699
6700         tg3_full_unlock(tp);
6701
6702         /* The placement of this call is tied
6703          * to the setup and use of Host TX descriptors.
6704          */
6705         err = tg3_alloc_consistent(tp);
6706         if (err)
6707                 return err;
6708
6709         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6710             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6711             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6712             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6713               (tp->pdev_peer == tp->pdev))) {
6714                 /* All MSI supporting chips should support tagged
6715                  * status.  Assert that this is the case.
6716                  */
6717                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6718                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6719                                "Not using MSI.\n", tp->dev->name);
6720                 } else if (pci_enable_msi(tp->pdev) == 0) {
6721                         u32 msi_mode;
6722
6723                         msi_mode = tr32(MSGINT_MODE);
6724                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6725                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6726                 }
6727         }
6728         err = tg3_request_irq(tp);
6729
6730         if (err) {
6731                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6732                         pci_disable_msi(tp->pdev);
6733                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6734                 }
6735                 tg3_free_consistent(tp);
6736                 return err;
6737         }
6738
6739         tg3_full_lock(tp, 0);
6740
6741         err = tg3_init_hw(tp);
6742         if (err) {
6743                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6744                 tg3_free_rings(tp);
6745         } else {
6746                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6747                         tp->timer_offset = HZ;
6748                 else
6749                         tp->timer_offset = HZ / 10;
6750
6751                 BUG_ON(tp->timer_offset > HZ);
6752                 tp->timer_counter = tp->timer_multiplier =
6753                         (HZ / tp->timer_offset);
6754                 tp->asf_counter = tp->asf_multiplier =
6755                         ((HZ / tp->timer_offset) * 2);
6756
6757                 init_timer(&tp->timer);
6758                 tp->timer.expires = jiffies + tp->timer_offset;
6759                 tp->timer.data = (unsigned long) tp;
6760                 tp->timer.function = tg3_timer;
6761         }
6762
6763         tg3_full_unlock(tp);
6764
6765         if (err) {
6766                 free_irq(tp->pdev->irq, dev);
6767                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6768                         pci_disable_msi(tp->pdev);
6769                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6770                 }
6771                 tg3_free_consistent(tp);
6772                 return err;
6773         }
6774
6775         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6776                 err = tg3_test_msi(tp);
6777
6778                 if (err) {
6779                         tg3_full_lock(tp, 0);
6780
6781                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6782                                 pci_disable_msi(tp->pdev);
6783                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6784                         }
6785                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6786                         tg3_free_rings(tp);
6787                         tg3_free_consistent(tp);
6788
6789                         tg3_full_unlock(tp);
6790
6791                         return err;
6792                 }
6793
6794                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6795                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6796                                 u32 val = tr32(0x7c04);
6797
6798                                 tw32(0x7c04, val | (1 << 29));
6799                         }
6800                 }
6801         }
6802
6803         tg3_full_lock(tp, 0);
6804
6805         add_timer(&tp->timer);
6806         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6807         tg3_enable_ints(tp);
6808
6809         tg3_full_unlock(tp);
6810
6811         netif_start_queue(dev);
6812
6813         return 0;
6814 }
6815
6816 #if 0
6817 /*static*/ void tg3_dump_state(struct tg3 *tp)
6818 {
6819         u32 val32, val32_2, val32_3, val32_4, val32_5;
6820         u16 val16;
6821         int i;
6822
6823         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6824         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6825         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6826                val16, val32);
6827
6828         /* MAC block */
6829         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6830                tr32(MAC_MODE), tr32(MAC_STATUS));
6831         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6832                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6833         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6834                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6835         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6836                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6837
6838         /* Send data initiator control block */
6839         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6840                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6841         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6842                tr32(SNDDATAI_STATSCTRL));
6843
6844         /* Send data completion control block */
6845         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6846
6847         /* Send BD ring selector block */
6848         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6849                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6850
6851         /* Send BD initiator control block */
6852         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6853                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6854
6855         /* Send BD completion control block */
6856         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6857
6858         /* Receive list placement control block */
6859         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6860                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6861         printk("       RCVLPC_STATSCTRL[%08x]\n",
6862                tr32(RCVLPC_STATSCTRL));
6863
6864         /* Receive data and receive BD initiator control block */
6865         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6866                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6867
6868         /* Receive data completion control block */
6869         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6870                tr32(RCVDCC_MODE));
6871
6872         /* Receive BD initiator control block */
6873         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6874                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6875
6876         /* Receive BD completion control block */
6877         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6878                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6879
6880         /* Receive list selector control block */
6881         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6882                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6883
6884         /* Mbuf cluster free block */
6885         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6886                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6887
6888         /* Host coalescing control block */
6889         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6890                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6891         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6892                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6893                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6894         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6895                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6896                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6897         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6898                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6899         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6900                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6901
6902         /* Memory arbiter control block */
6903         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6904                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6905
6906         /* Buffer manager control block */
6907         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6908                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6909         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6910                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6911         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6912                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6913                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6914                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6915
6916         /* Read DMA control block */
6917         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6918                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6919
6920         /* Write DMA control block */
6921         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6922                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6923
6924         /* DMA completion block */
6925         printk("DEBUG: DMAC_MODE[%08x]\n",
6926                tr32(DMAC_MODE));
6927
6928         /* GRC block */
6929         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6930                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6931         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6932                tr32(GRC_LOCAL_CTRL));
6933
6934         /* TG3_BDINFOs */
6935         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6936                tr32(RCVDBDI_JUMBO_BD + 0x0),
6937                tr32(RCVDBDI_JUMBO_BD + 0x4),
6938                tr32(RCVDBDI_JUMBO_BD + 0x8),
6939                tr32(RCVDBDI_JUMBO_BD + 0xc));
6940         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6941                tr32(RCVDBDI_STD_BD + 0x0),
6942                tr32(RCVDBDI_STD_BD + 0x4),
6943                tr32(RCVDBDI_STD_BD + 0x8),
6944                tr32(RCVDBDI_STD_BD + 0xc));
6945         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6946                tr32(RCVDBDI_MINI_BD + 0x0),
6947                tr32(RCVDBDI_MINI_BD + 0x4),
6948                tr32(RCVDBDI_MINI_BD + 0x8),
6949                tr32(RCVDBDI_MINI_BD + 0xc));
6950
6951         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6952         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6953         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6954         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6955         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6956                val32, val32_2, val32_3, val32_4);
6957
6958         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6959         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6960         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6961         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6962         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6963                val32, val32_2, val32_3, val32_4);
6964
6965         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6966         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6967         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6968         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6969         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6970         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6971                val32, val32_2, val32_3, val32_4, val32_5);
6972
6973         /* SW status block */
6974         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6975                tp->hw_status->status,
6976                tp->hw_status->status_tag,
6977                tp->hw_status->rx_jumbo_consumer,
6978                tp->hw_status->rx_consumer,
6979                tp->hw_status->rx_mini_consumer,
6980                tp->hw_status->idx[0].rx_producer,
6981                tp->hw_status->idx[0].tx_consumer);
6982
6983         /* SW statistics block */
6984         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6985                ((u32 *)tp->hw_stats)[0],
6986                ((u32 *)tp->hw_stats)[1],
6987                ((u32 *)tp->hw_stats)[2],
6988                ((u32 *)tp->hw_stats)[3]);
6989
6990         /* Mailboxes */
6991         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6992                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6993                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6994                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6995                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6996
6997         /* NIC side send descriptors. */
6998         for (i = 0; i < 6; i++) {
6999                 unsigned long txd;
7000
7001                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7002                         + (i * sizeof(struct tg3_tx_buffer_desc));
7003                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7004                        i,
7005                        readl(txd + 0x0), readl(txd + 0x4),
7006                        readl(txd + 0x8), readl(txd + 0xc));
7007         }
7008
7009         /* NIC side RX descriptors. */
7010         for (i = 0; i < 6; i++) {
7011                 unsigned long rxd;
7012
7013                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7014                         + (i * sizeof(struct tg3_rx_buffer_desc));
7015                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7016                        i,
7017                        readl(rxd + 0x0), readl(rxd + 0x4),
7018                        readl(rxd + 0x8), readl(rxd + 0xc));
7019                 rxd += (4 * sizeof(u32));
7020                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7021                        i,
7022                        readl(rxd + 0x0), readl(rxd + 0x4),
7023                        readl(rxd + 0x8), readl(rxd + 0xc));
7024         }
7025
7026         for (i = 0; i < 6; i++) {
7027                 unsigned long rxd;
7028
7029                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7030                         + (i * sizeof(struct tg3_rx_buffer_desc));
7031                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7032                        i,
7033                        readl(rxd + 0x0), readl(rxd + 0x4),
7034                        readl(rxd + 0x8), readl(rxd + 0xc));
7035                 rxd += (4 * sizeof(u32));
7036                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7037                        i,
7038                        readl(rxd + 0x0), readl(rxd + 0x4),
7039                        readl(rxd + 0x8), readl(rxd + 0xc));
7040         }
7041 }
7042 #endif
7043
7044 static struct net_device_stats *tg3_get_stats(struct net_device *);
7045 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7046
7047 static int tg3_close(struct net_device *dev)
7048 {
7049         struct tg3 *tp = netdev_priv(dev);
7050
7051         /* Calling flush_scheduled_work() may deadlock because
7052          * linkwatch_event() may be on the workqueue and it will try to get
7053          * the rtnl_lock which we are holding.
7054          */
7055         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7056                 msleep(1);
7057
7058         netif_stop_queue(dev);
7059
7060         del_timer_sync(&tp->timer);
7061
7062         tg3_full_lock(tp, 1);
7063 #if 0
7064         tg3_dump_state(tp);
7065 #endif
7066
7067         tg3_disable_ints(tp);
7068
7069         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7070         tg3_free_rings(tp);
7071         tp->tg3_flags &=
7072                 ~(TG3_FLAG_INIT_COMPLETE |
7073                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7074
7075         tg3_full_unlock(tp);
7076
7077         free_irq(tp->pdev->irq, dev);
7078         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7079                 pci_disable_msi(tp->pdev);
7080                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7081         }
7082
7083         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7084                sizeof(tp->net_stats_prev));
7085         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7086                sizeof(tp->estats_prev));
7087
7088         tg3_free_consistent(tp);
7089
7090         tg3_set_power_state(tp, PCI_D3hot);
7091
7092         netif_carrier_off(tp->dev);
7093
7094         return 0;
7095 }
7096
7097 static inline unsigned long get_stat64(tg3_stat64_t *val)
7098 {
7099         unsigned long ret;
7100
7101 #if (BITS_PER_LONG == 32)
7102         ret = val->low;
7103 #else
7104         ret = ((u64)val->high << 32) | ((u64)val->low);
7105 #endif
7106         return ret;
7107 }
7108
7109 static unsigned long calc_crc_errors(struct tg3 *tp)
7110 {
7111         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7112
7113         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7114             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7115              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7116                 u32 val;
7117
7118                 spin_lock_bh(&tp->lock);
7119                 if (!tg3_readphy(tp, 0x1e, &val)) {
7120                         tg3_writephy(tp, 0x1e, val | 0x8000);
7121                         tg3_readphy(tp, 0x14, &val);
7122                 } else
7123                         val = 0;
7124                 spin_unlock_bh(&tp->lock);
7125
7126                 tp->phy_crc_errors += val;
7127
7128                 return tp->phy_crc_errors;
7129         }
7130
7131         return get_stat64(&hw_stats->rx_fcs_errors);
7132 }
7133
7134 #define ESTAT_ADD(member) \
7135         estats->member =        old_estats->member + \
7136                                 get_stat64(&hw_stats->member)
7137
7138 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7139 {
7140         struct tg3_ethtool_stats *estats = &tp->estats;
7141         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7142         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7143
7144         if (!hw_stats)
7145                 return old_estats;
7146
7147         ESTAT_ADD(rx_octets);
7148         ESTAT_ADD(rx_fragments);
7149         ESTAT_ADD(rx_ucast_packets);
7150         ESTAT_ADD(rx_mcast_packets);
7151         ESTAT_ADD(rx_bcast_packets);
7152         ESTAT_ADD(rx_fcs_errors);
7153         ESTAT_ADD(rx_align_errors);
7154         ESTAT_ADD(rx_xon_pause_rcvd);
7155         ESTAT_ADD(rx_xoff_pause_rcvd);
7156         ESTAT_ADD(rx_mac_ctrl_rcvd);
7157         ESTAT_ADD(rx_xoff_entered);
7158         ESTAT_ADD(rx_frame_too_long_errors);
7159         ESTAT_ADD(rx_jabbers);
7160         ESTAT_ADD(rx_undersize_packets);
7161         ESTAT_ADD(rx_in_length_errors);
7162         ESTAT_ADD(rx_out_length_errors);
7163         ESTAT_ADD(rx_64_or_less_octet_packets);
7164         ESTAT_ADD(rx_65_to_127_octet_packets);
7165         ESTAT_ADD(rx_128_to_255_octet_packets);
7166         ESTAT_ADD(rx_256_to_511_octet_packets);
7167         ESTAT_ADD(rx_512_to_1023_octet_packets);
7168         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7169         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7170         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7171         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7172         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7173
7174         ESTAT_ADD(tx_octets);
7175         ESTAT_ADD(tx_collisions);
7176         ESTAT_ADD(tx_xon_sent);
7177         ESTAT_ADD(tx_xoff_sent);
7178         ESTAT_ADD(tx_flow_control);
7179         ESTAT_ADD(tx_mac_errors);
7180         ESTAT_ADD(tx_single_collisions);
7181         ESTAT_ADD(tx_mult_collisions);
7182         ESTAT_ADD(tx_deferred);
7183         ESTAT_ADD(tx_excessive_collisions);
7184         ESTAT_ADD(tx_late_collisions);
7185         ESTAT_ADD(tx_collide_2times);
7186         ESTAT_ADD(tx_collide_3times);
7187         ESTAT_ADD(tx_collide_4times);
7188         ESTAT_ADD(tx_collide_5times);
7189         ESTAT_ADD(tx_collide_6times);
7190         ESTAT_ADD(tx_collide_7times);
7191         ESTAT_ADD(tx_collide_8times);
7192         ESTAT_ADD(tx_collide_9times);
7193         ESTAT_ADD(tx_collide_10times);
7194         ESTAT_ADD(tx_collide_11times);
7195         ESTAT_ADD(tx_collide_12times);
7196         ESTAT_ADD(tx_collide_13times);
7197         ESTAT_ADD(tx_collide_14times);
7198         ESTAT_ADD(tx_collide_15times);
7199         ESTAT_ADD(tx_ucast_packets);
7200         ESTAT_ADD(tx_mcast_packets);
7201         ESTAT_ADD(tx_bcast_packets);
7202         ESTAT_ADD(tx_carrier_sense_errors);
7203         ESTAT_ADD(tx_discards);
7204         ESTAT_ADD(tx_errors);
7205
7206         ESTAT_ADD(dma_writeq_full);
7207         ESTAT_ADD(dma_write_prioq_full);
7208         ESTAT_ADD(rxbds_empty);
7209         ESTAT_ADD(rx_discards);
7210         ESTAT_ADD(rx_errors);
7211         ESTAT_ADD(rx_threshold_hit);
7212
7213         ESTAT_ADD(dma_readq_full);
7214         ESTAT_ADD(dma_read_prioq_full);
7215         ESTAT_ADD(tx_comp_queue_full);
7216
7217         ESTAT_ADD(ring_set_send_prod_index);
7218         ESTAT_ADD(ring_status_update);
7219         ESTAT_ADD(nic_irqs);
7220         ESTAT_ADD(nic_avoided_irqs);
7221         ESTAT_ADD(nic_tx_threshold_hit);
7222
7223         return estats;
7224 }
7225
7226 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7227 {
7228         struct tg3 *tp = netdev_priv(dev);
7229         struct net_device_stats *stats = &tp->net_stats;
7230         struct net_device_stats *old_stats = &tp->net_stats_prev;
7231         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7232
7233         if (!hw_stats)
7234                 return old_stats;
7235
7236         stats->rx_packets = old_stats->rx_packets +
7237                 get_stat64(&hw_stats->rx_ucast_packets) +
7238                 get_stat64(&hw_stats->rx_mcast_packets) +
7239                 get_stat64(&hw_stats->rx_bcast_packets);
7240                 
7241         stats->tx_packets = old_stats->tx_packets +
7242                 get_stat64(&hw_stats->tx_ucast_packets) +
7243                 get_stat64(&hw_stats->tx_mcast_packets) +
7244                 get_stat64(&hw_stats->tx_bcast_packets);
7245
7246         stats->rx_bytes = old_stats->rx_bytes +
7247                 get_stat64(&hw_stats->rx_octets);
7248         stats->tx_bytes = old_stats->tx_bytes +
7249                 get_stat64(&hw_stats->tx_octets);
7250
7251         stats->rx_errors = old_stats->rx_errors +
7252                 get_stat64(&hw_stats->rx_errors);
7253         stats->tx_errors = old_stats->tx_errors +
7254                 get_stat64(&hw_stats->tx_errors) +
7255                 get_stat64(&hw_stats->tx_mac_errors) +
7256                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7257                 get_stat64(&hw_stats->tx_discards);
7258
7259         stats->multicast = old_stats->multicast +
7260                 get_stat64(&hw_stats->rx_mcast_packets);
7261         stats->collisions = old_stats->collisions +
7262                 get_stat64(&hw_stats->tx_collisions);
7263
7264         stats->rx_length_errors = old_stats->rx_length_errors +
7265                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7266                 get_stat64(&hw_stats->rx_undersize_packets);
7267
7268         stats->rx_over_errors = old_stats->rx_over_errors +
7269                 get_stat64(&hw_stats->rxbds_empty);
7270         stats->rx_frame_errors = old_stats->rx_frame_errors +
7271                 get_stat64(&hw_stats->rx_align_errors);
7272         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7273                 get_stat64(&hw_stats->tx_discards);
7274         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7275                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7276
7277         stats->rx_crc_errors = old_stats->rx_crc_errors +
7278                 calc_crc_errors(tp);
7279
7280         stats->rx_missed_errors = old_stats->rx_missed_errors +
7281                 get_stat64(&hw_stats->rx_discards);
7282
7283         return stats;
7284 }
7285
7286 static inline u32 calc_crc(unsigned char *buf, int len)
7287 {
7288         u32 reg;
7289         u32 tmp;
7290         int j, k;
7291
7292         reg = 0xffffffff;
7293
7294         for (j = 0; j < len; j++) {
7295                 reg ^= buf[j];
7296
7297                 for (k = 0; k < 8; k++) {
7298                         tmp = reg & 0x01;
7299
7300                         reg >>= 1;
7301
7302                         if (tmp) {
7303                                 reg ^= 0xedb88320;
7304                         }
7305                 }
7306         }
7307
7308         return ~reg;
7309 }
7310
7311 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7312 {
7313         /* accept or reject all multicast frames */
7314         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7315         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7316         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7317         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7318 }
7319
7320 static void __tg3_set_rx_mode(struct net_device *dev)
7321 {
7322         struct tg3 *tp = netdev_priv(dev);
7323         u32 rx_mode;
7324
7325         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7326                                   RX_MODE_KEEP_VLAN_TAG);
7327
7328         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7329          * flag clear.
7330          */
7331 #if TG3_VLAN_TAG_USED
7332         if (!tp->vlgrp &&
7333             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7334                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7335 #else
7336         /* By definition, VLAN is disabled always in this
7337          * case.
7338          */
7339         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7340                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7341 #endif
7342
7343         if (dev->flags & IFF_PROMISC) {
7344                 /* Promiscuous mode. */
7345                 rx_mode |= RX_MODE_PROMISC;
7346         } else if (dev->flags & IFF_ALLMULTI) {
7347                 /* Accept all multicast. */
7348                 tg3_set_multi (tp, 1);
7349         } else if (dev->mc_count < 1) {
7350                 /* Reject all multicast. */
7351                 tg3_set_multi (tp, 0);
7352         } else {
7353                 /* Accept one or more multicast(s). */
7354                 struct dev_mc_list *mclist;
7355                 unsigned int i;
7356                 u32 mc_filter[4] = { 0, };
7357                 u32 regidx;
7358                 u32 bit;
7359                 u32 crc;
7360
7361                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7362                      i++, mclist = mclist->next) {
7363
7364                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7365                         bit = ~crc & 0x7f;
7366                         regidx = (bit & 0x60) >> 5;
7367                         bit &= 0x1f;
7368                         mc_filter[regidx] |= (1 << bit);
7369                 }
7370
7371                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7372                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7373                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7374                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7375         }
7376
7377         if (rx_mode != tp->rx_mode) {
7378                 tp->rx_mode = rx_mode;
7379                 tw32_f(MAC_RX_MODE, rx_mode);
7380                 udelay(10);
7381         }
7382 }
7383
7384 static void tg3_set_rx_mode(struct net_device *dev)
7385 {
7386         struct tg3 *tp = netdev_priv(dev);
7387
7388         if (!netif_running(dev))
7389                 return;
7390
7391         tg3_full_lock(tp, 0);
7392         __tg3_set_rx_mode(dev);
7393         tg3_full_unlock(tp);
7394 }
7395
7396 #define TG3_REGDUMP_LEN         (32 * 1024)
7397
7398 static int tg3_get_regs_len(struct net_device *dev)
7399 {
7400         return TG3_REGDUMP_LEN;
7401 }
7402
7403 static void tg3_get_regs(struct net_device *dev,
7404                 struct ethtool_regs *regs, void *_p)
7405 {
7406         u32 *p = _p;
7407         struct tg3 *tp = netdev_priv(dev);
7408         u8 *orig_p = _p;
7409         int i;
7410
7411         regs->version = 0;
7412
7413         memset(p, 0, TG3_REGDUMP_LEN);
7414
7415         if (tp->link_config.phy_is_low_power)
7416                 return;
7417
7418         tg3_full_lock(tp, 0);
7419
7420 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7421 #define GET_REG32_LOOP(base,len)                \
7422 do {    p = (u32 *)(orig_p + (base));           \
7423         for (i = 0; i < len; i += 4)            \
7424                 __GET_REG32((base) + i);        \
7425 } while (0)
7426 #define GET_REG32_1(reg)                        \
7427 do {    p = (u32 *)(orig_p + (reg));            \
7428         __GET_REG32((reg));                     \
7429 } while (0)
7430
7431         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7432         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7433         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7434         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7435         GET_REG32_1(SNDDATAC_MODE);
7436         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7437         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7438         GET_REG32_1(SNDBDC_MODE);
7439         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7440         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7441         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7442         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7443         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7444         GET_REG32_1(RCVDCC_MODE);
7445         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7446         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7447         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7448         GET_REG32_1(MBFREE_MODE);
7449         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7450         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7451         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7452         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7453         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7454         GET_REG32_1(RX_CPU_MODE);
7455         GET_REG32_1(RX_CPU_STATE);
7456         GET_REG32_1(RX_CPU_PGMCTR);
7457         GET_REG32_1(RX_CPU_HWBKPT);
7458         GET_REG32_1(TX_CPU_MODE);
7459         GET_REG32_1(TX_CPU_STATE);
7460         GET_REG32_1(TX_CPU_PGMCTR);
7461         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7462         GET_REG32_LOOP(FTQ_RESET, 0x120);
7463         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7464         GET_REG32_1(DMAC_MODE);
7465         GET_REG32_LOOP(GRC_MODE, 0x4c);
7466         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7467                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7468
7469 #undef __GET_REG32
7470 #undef GET_REG32_LOOP
7471 #undef GET_REG32_1
7472
7473         tg3_full_unlock(tp);
7474 }
7475
7476 static int tg3_get_eeprom_len(struct net_device *dev)
7477 {
7478         struct tg3 *tp = netdev_priv(dev);
7479
7480         return tp->nvram_size;
7481 }
7482
7483 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7484 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7485
7486 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7487 {
7488         struct tg3 *tp = netdev_priv(dev);
7489         int ret;
7490         u8  *pd;
7491         u32 i, offset, len, val, b_offset, b_count;
7492
7493         if (tp->link_config.phy_is_low_power)
7494                 return -EAGAIN;
7495
7496         offset = eeprom->offset;
7497         len = eeprom->len;
7498         eeprom->len = 0;
7499
7500         eeprom->magic = TG3_EEPROM_MAGIC;
7501
7502         if (offset & 3) {
7503                 /* adjustments to start on required 4 byte boundary */
7504                 b_offset = offset & 3;
7505                 b_count = 4 - b_offset;
7506                 if (b_count > len) {
7507                         /* i.e. offset=1 len=2 */
7508                         b_count = len;
7509                 }
7510                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7511                 if (ret)
7512                         return ret;
7513                 val = cpu_to_le32(val);
7514                 memcpy(data, ((char*)&val) + b_offset, b_count);
7515                 len -= b_count;
7516                 offset += b_count;
7517                 eeprom->len += b_count;
7518         }
7519
7520         /* read bytes upto the last 4 byte boundary */
7521         pd = &data[eeprom->len];
7522         for (i = 0; i < (len - (len & 3)); i += 4) {
7523                 ret = tg3_nvram_read(tp, offset + i, &val);
7524                 if (ret) {
7525                         eeprom->len += i;
7526                         return ret;
7527                 }
7528                 val = cpu_to_le32(val);
7529                 memcpy(pd + i, &val, 4);
7530         }
7531         eeprom->len += i;
7532
7533         if (len & 3) {
7534                 /* read last bytes not ending on 4 byte boundary */
7535                 pd = &data[eeprom->len];
7536                 b_count = len & 3;
7537                 b_offset = offset + len - b_count;
7538                 ret = tg3_nvram_read(tp, b_offset, &val);
7539                 if (ret)
7540                         return ret;
7541                 val = cpu_to_le32(val);
7542                 memcpy(pd, ((char*)&val), b_count);
7543                 eeprom->len += b_count;
7544         }
7545         return 0;
7546 }
7547
7548 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7549
7550 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7551 {
7552         struct tg3 *tp = netdev_priv(dev);
7553         int ret;
7554         u32 offset, len, b_offset, odd_len, start, end;
7555         u8 *buf;
7556
7557         if (tp->link_config.phy_is_low_power)
7558                 return -EAGAIN;
7559
7560         if (eeprom->magic != TG3_EEPROM_MAGIC)
7561                 return -EINVAL;
7562
7563         offset = eeprom->offset;
7564         len = eeprom->len;
7565
7566         if ((b_offset = (offset & 3))) {
7567                 /* adjustments to start on required 4 byte boundary */
7568                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7569                 if (ret)
7570                         return ret;
7571                 start = cpu_to_le32(start);
7572                 len += b_offset;
7573                 offset &= ~3;
7574                 if (len < 4)
7575                         len = 4;
7576         }
7577
7578         odd_len = 0;
7579         if (len & 3) {
7580                 /* adjustments to end on required 4 byte boundary */
7581                 odd_len = 1;
7582                 len = (len + 3) & ~3;
7583                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7584                 if (ret)
7585                         return ret;
7586                 end = cpu_to_le32(end);
7587         }
7588
7589         buf = data;
7590         if (b_offset || odd_len) {
7591                 buf = kmalloc(len, GFP_KERNEL);
7592                 if (buf == 0)
7593                         return -ENOMEM;
7594                 if (b_offset)
7595                         memcpy(buf, &start, 4);
7596                 if (odd_len)
7597                         memcpy(buf+len-4, &end, 4);
7598                 memcpy(buf + b_offset, data, eeprom->len);
7599         }
7600
7601         ret = tg3_nvram_write_block(tp, offset, len, buf);
7602
7603         if (buf != data)
7604                 kfree(buf);
7605
7606         return ret;
7607 }
7608
7609 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7610 {
7611         struct tg3 *tp = netdev_priv(dev);
7612   
7613         cmd->supported = (SUPPORTED_Autoneg);
7614
7615         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7616                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7617                                    SUPPORTED_1000baseT_Full);
7618
7619         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7620                 cmd->supported |= (SUPPORTED_100baseT_Half |
7621                                   SUPPORTED_100baseT_Full |
7622                                   SUPPORTED_10baseT_Half |
7623                                   SUPPORTED_10baseT_Full |
7624                                   SUPPORTED_MII);
7625         else
7626                 cmd->supported |= SUPPORTED_FIBRE;
7627   
7628         cmd->advertising = tp->link_config.advertising;
7629         if (netif_running(dev)) {
7630                 cmd->speed = tp->link_config.active_speed;
7631                 cmd->duplex = tp->link_config.active_duplex;
7632         }
7633         cmd->port = 0;
7634         cmd->phy_address = PHY_ADDR;
7635         cmd->transceiver = 0;
7636         cmd->autoneg = tp->link_config.autoneg;
7637         cmd->maxtxpkt = 0;
7638         cmd->maxrxpkt = 0;
7639         return 0;
7640 }
7641   
7642 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7643 {
7644         struct tg3 *tp = netdev_priv(dev);
7645   
7646         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7647                 /* These are the only valid advertisement bits allowed.  */
7648                 if (cmd->autoneg == AUTONEG_ENABLE &&
7649                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7650                                           ADVERTISED_1000baseT_Full |
7651                                           ADVERTISED_Autoneg |
7652                                           ADVERTISED_FIBRE)))
7653                         return -EINVAL;
7654                 /* Fiber can only do SPEED_1000.  */
7655                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7656                          (cmd->speed != SPEED_1000))
7657                         return -EINVAL;
7658         /* Copper cannot force SPEED_1000.  */
7659         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7660                    (cmd->speed == SPEED_1000))
7661                 return -EINVAL;
7662         else if ((cmd->speed == SPEED_1000) &&
7663                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7664                 return -EINVAL;
7665
7666         tg3_full_lock(tp, 0);
7667
7668         tp->link_config.autoneg = cmd->autoneg;
7669         if (cmd->autoneg == AUTONEG_ENABLE) {
7670                 tp->link_config.advertising = cmd->advertising;
7671                 tp->link_config.speed = SPEED_INVALID;
7672                 tp->link_config.duplex = DUPLEX_INVALID;
7673         } else {
7674                 tp->link_config.advertising = 0;
7675                 tp->link_config.speed = cmd->speed;
7676                 tp->link_config.duplex = cmd->duplex;
7677         }
7678   
7679         if (netif_running(dev))
7680                 tg3_setup_phy(tp, 1);
7681
7682         tg3_full_unlock(tp);
7683   
7684         return 0;
7685 }
7686   
7687 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7688 {
7689         struct tg3 *tp = netdev_priv(dev);
7690   
7691         strcpy(info->driver, DRV_MODULE_NAME);
7692         strcpy(info->version, DRV_MODULE_VERSION);
7693         strcpy(info->fw_version, tp->fw_ver);
7694         strcpy(info->bus_info, pci_name(tp->pdev));
7695 }
7696   
7697 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7698 {
7699         struct tg3 *tp = netdev_priv(dev);
7700   
7701         wol->supported = WAKE_MAGIC;
7702         wol->wolopts = 0;
7703         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7704                 wol->wolopts = WAKE_MAGIC;
7705         memset(&wol->sopass, 0, sizeof(wol->sopass));
7706 }
7707   
7708 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7709 {
7710         struct tg3 *tp = netdev_priv(dev);
7711   
7712         if (wol->wolopts & ~WAKE_MAGIC)
7713                 return -EINVAL;
7714         if ((wol->wolopts & WAKE_MAGIC) &&
7715             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7716             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7717                 return -EINVAL;
7718   
7719         spin_lock_bh(&tp->lock);
7720         if (wol->wolopts & WAKE_MAGIC)
7721                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7722         else
7723                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7724         spin_unlock_bh(&tp->lock);
7725   
7726         return 0;
7727 }
7728   
7729 static u32 tg3_get_msglevel(struct net_device *dev)
7730 {
7731         struct tg3 *tp = netdev_priv(dev);
7732         return tp->msg_enable;
7733 }
7734   
7735 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7736 {
7737         struct tg3 *tp = netdev_priv(dev);
7738         tp->msg_enable = value;
7739 }
7740   
7741 #if TG3_TSO_SUPPORT != 0
7742 static int tg3_set_tso(struct net_device *dev, u32 value)
7743 {
7744         struct tg3 *tp = netdev_priv(dev);
7745
7746         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7747                 if (value)
7748                         return -EINVAL;
7749                 return 0;
7750         }
7751         return ethtool_op_set_tso(dev, value);
7752 }
7753 #endif
7754   
7755 static int tg3_nway_reset(struct net_device *dev)
7756 {
7757         struct tg3 *tp = netdev_priv(dev);
7758         u32 bmcr;
7759         int r;
7760   
7761         if (!netif_running(dev))
7762                 return -EAGAIN;
7763
7764         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7765                 return -EINVAL;
7766
7767         spin_lock_bh(&tp->lock);
7768         r = -EINVAL;
7769         tg3_readphy(tp, MII_BMCR, &bmcr);
7770         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7771             ((bmcr & BMCR_ANENABLE) ||
7772              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7773                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7774                                            BMCR_ANENABLE);
7775                 r = 0;
7776         }
7777         spin_unlock_bh(&tp->lock);
7778   
7779         return r;
7780 }
7781   
7782 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7783 {
7784         struct tg3 *tp = netdev_priv(dev);
7785   
7786         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7787         ering->rx_mini_max_pending = 0;
7788         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7789                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7790         else
7791                 ering->rx_jumbo_max_pending = 0;
7792
7793         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7794
7795         ering->rx_pending = tp->rx_pending;
7796         ering->rx_mini_pending = 0;
7797         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7798                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7799         else
7800                 ering->rx_jumbo_pending = 0;
7801
7802         ering->tx_pending = tp->tx_pending;
7803 }
7804   
7805 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7806 {
7807         struct tg3 *tp = netdev_priv(dev);
7808         int irq_sync = 0;
7809   
7810         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7811             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7812             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7813                 return -EINVAL;
7814   
7815         if (netif_running(dev)) {
7816                 tg3_netif_stop(tp);
7817                 irq_sync = 1;
7818         }
7819
7820         tg3_full_lock(tp, irq_sync);
7821   
7822         tp->rx_pending = ering->rx_pending;
7823
7824         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7825             tp->rx_pending > 63)
7826                 tp->rx_pending = 63;
7827         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7828         tp->tx_pending = ering->tx_pending;
7829
7830         if (netif_running(dev)) {
7831                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7832                 tg3_init_hw(tp);
7833                 tg3_netif_start(tp);
7834         }
7835
7836         tg3_full_unlock(tp);
7837   
7838         return 0;
7839 }
7840   
7841 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7842 {
7843         struct tg3 *tp = netdev_priv(dev);
7844   
7845         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7846         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7847         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7848 }
7849   
7850 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7851 {
7852         struct tg3 *tp = netdev_priv(dev);
7853         int irq_sync = 0;
7854   
7855         if (netif_running(dev)) {
7856                 tg3_netif_stop(tp);
7857                 irq_sync = 1;
7858         }
7859
7860         tg3_full_lock(tp, irq_sync);
7861
7862         if (epause->autoneg)
7863                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7864         else
7865                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7866         if (epause->rx_pause)
7867                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7868         else
7869                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7870         if (epause->tx_pause)
7871                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7872         else
7873                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7874
7875         if (netif_running(dev)) {
7876                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7877                 tg3_init_hw(tp);
7878                 tg3_netif_start(tp);
7879         }
7880
7881         tg3_full_unlock(tp);
7882   
7883         return 0;
7884 }
7885   
7886 static u32 tg3_get_rx_csum(struct net_device *dev)
7887 {
7888         struct tg3 *tp = netdev_priv(dev);
7889         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7890 }
7891   
7892 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7893 {
7894         struct tg3 *tp = netdev_priv(dev);
7895   
7896         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7897                 if (data != 0)
7898                         return -EINVAL;
7899                 return 0;
7900         }
7901   
7902         spin_lock_bh(&tp->lock);
7903         if (data)
7904                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7905         else
7906                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7907         spin_unlock_bh(&tp->lock);
7908   
7909         return 0;
7910 }
7911   
7912 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7913 {
7914         struct tg3 *tp = netdev_priv(dev);
7915   
7916         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7917                 if (data != 0)
7918                         return -EINVAL;
7919                 return 0;
7920         }
7921   
7922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7924                 ethtool_op_set_tx_hw_csum(dev, data);
7925         else
7926                 ethtool_op_set_tx_csum(dev, data);
7927
7928         return 0;
7929 }
7930
7931 static int tg3_get_stats_count (struct net_device *dev)
7932 {
7933         return TG3_NUM_STATS;
7934 }
7935
7936 static int tg3_get_test_count (struct net_device *dev)
7937 {
7938         return TG3_NUM_TEST;
7939 }
7940
7941 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7942 {
7943         switch (stringset) {
7944         case ETH_SS_STATS:
7945                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7946                 break;
7947         case ETH_SS_TEST:
7948                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7949                 break;
7950         default:
7951                 WARN_ON(1);     /* we need a WARN() */
7952                 break;
7953         }
7954 }
7955
7956 static int tg3_phys_id(struct net_device *dev, u32 data)
7957 {
7958         struct tg3 *tp = netdev_priv(dev);
7959         int i;
7960
7961         if (!netif_running(tp->dev))
7962                 return -EAGAIN;
7963
7964         if (data == 0)
7965                 data = 2;
7966
7967         for (i = 0; i < (data * 2); i++) {
7968                 if ((i % 2) == 0)
7969                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7970                                            LED_CTRL_1000MBPS_ON |
7971                                            LED_CTRL_100MBPS_ON |
7972                                            LED_CTRL_10MBPS_ON |
7973                                            LED_CTRL_TRAFFIC_OVERRIDE |
7974                                            LED_CTRL_TRAFFIC_BLINK |
7975                                            LED_CTRL_TRAFFIC_LED);
7976         
7977                 else
7978                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7979                                            LED_CTRL_TRAFFIC_OVERRIDE);
7980
7981                 if (msleep_interruptible(500))
7982                         break;
7983         }
7984         tw32(MAC_LED_CTRL, tp->led_ctrl);
7985         return 0;
7986 }
7987
7988 static void tg3_get_ethtool_stats (struct net_device *dev,
7989                                    struct ethtool_stats *estats, u64 *tmp_stats)
7990 {
7991         struct tg3 *tp = netdev_priv(dev);
7992         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7993 }
7994
7995 #define NVRAM_TEST_SIZE 0x100
7996 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7997
7998 static int tg3_test_nvram(struct tg3 *tp)
7999 {
8000         u32 *buf, csum, magic;
8001         int i, j, err = 0, size;
8002
8003         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8004                 return -EIO;
8005
8006         if (magic == TG3_EEPROM_MAGIC)
8007                 size = NVRAM_TEST_SIZE;
8008         else if ((magic & 0xff000000) == 0xa5000000) {
8009                 if ((magic & 0xe00000) == 0x200000)
8010                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8011                 else
8012                         return 0;
8013         } else
8014                 return -EIO;
8015
8016         buf = kmalloc(size, GFP_KERNEL);
8017         if (buf == NULL)
8018                 return -ENOMEM;
8019
8020         err = -EIO;
8021         for (i = 0, j = 0; i < size; i += 4, j++) {
8022                 u32 val;
8023
8024                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8025                         break;
8026                 buf[j] = cpu_to_le32(val);
8027         }
8028         if (i < size)
8029                 goto out;
8030
8031         /* Selfboot format */
8032         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8033                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8034
8035                 for (i = 0; i < size; i++)
8036                         csum8 += buf8[i];
8037
8038                 if (csum8 == 0)
8039                         return 0;
8040                 return -EIO;
8041         }
8042
8043         /* Bootstrap checksum at offset 0x10 */
8044         csum = calc_crc((unsigned char *) buf, 0x10);
8045         if(csum != cpu_to_le32(buf[0x10/4]))
8046                 goto out;
8047
8048         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8049         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8050         if (csum != cpu_to_le32(buf[0xfc/4]))
8051                  goto out;
8052
8053         err = 0;
8054
8055 out:
8056         kfree(buf);
8057         return err;
8058 }
8059
8060 #define TG3_SERDES_TIMEOUT_SEC  2
8061 #define TG3_COPPER_TIMEOUT_SEC  6
8062
8063 static int tg3_test_link(struct tg3 *tp)
8064 {
8065         int i, max;
8066
8067         if (!netif_running(tp->dev))
8068                 return -ENODEV;
8069
8070         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8071                 max = TG3_SERDES_TIMEOUT_SEC;
8072         else
8073                 max = TG3_COPPER_TIMEOUT_SEC;
8074
8075         for (i = 0; i < max; i++) {
8076                 if (netif_carrier_ok(tp->dev))
8077                         return 0;
8078
8079                 if (msleep_interruptible(1000))
8080                         break;
8081         }
8082
8083         return -EIO;
8084 }
8085
8086 /* Only test the commonly used registers */
8087 static int tg3_test_registers(struct tg3 *tp)
8088 {
8089         int i, is_5705;
8090         u32 offset, read_mask, write_mask, val, save_val, read_val;
8091         static struct {
8092                 u16 offset;
8093                 u16 flags;
8094 #define TG3_FL_5705     0x1
8095 #define TG3_FL_NOT_5705 0x2
8096 #define TG3_FL_NOT_5788 0x4
8097                 u32 read_mask;
8098                 u32 write_mask;
8099         } reg_tbl[] = {
8100                 /* MAC Control Registers */
8101                 { MAC_MODE, TG3_FL_NOT_5705,
8102                         0x00000000, 0x00ef6f8c },
8103                 { MAC_MODE, TG3_FL_5705,
8104                         0x00000000, 0x01ef6b8c },
8105                 { MAC_STATUS, TG3_FL_NOT_5705,
8106                         0x03800107, 0x00000000 },
8107                 { MAC_STATUS, TG3_FL_5705,
8108                         0x03800100, 0x00000000 },
8109                 { MAC_ADDR_0_HIGH, 0x0000,
8110                         0x00000000, 0x0000ffff },
8111                 { MAC_ADDR_0_LOW, 0x0000,
8112                         0x00000000, 0xffffffff },
8113                 { MAC_RX_MTU_SIZE, 0x0000,
8114                         0x00000000, 0x0000ffff },
8115                 { MAC_TX_MODE, 0x0000,
8116                         0x00000000, 0x00000070 },
8117                 { MAC_TX_LENGTHS, 0x0000,
8118                         0x00000000, 0x00003fff },
8119                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8120                         0x00000000, 0x000007fc },
8121                 { MAC_RX_MODE, TG3_FL_5705,
8122                         0x00000000, 0x000007dc },
8123                 { MAC_HASH_REG_0, 0x0000,
8124                         0x00000000, 0xffffffff },
8125                 { MAC_HASH_REG_1, 0x0000,
8126                         0x00000000, 0xffffffff },
8127                 { MAC_HASH_REG_2, 0x0000,
8128                         0x00000000, 0xffffffff },
8129                 { MAC_HASH_REG_3, 0x0000,
8130                         0x00000000, 0xffffffff },
8131
8132                 /* Receive Data and Receive BD Initiator Control Registers. */
8133                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8134                         0x00000000, 0xffffffff },
8135                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8136                         0x00000000, 0xffffffff },
8137                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8138                         0x00000000, 0x00000003 },
8139                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8140                         0x00000000, 0xffffffff },
8141                 { RCVDBDI_STD_BD+0, 0x0000,
8142                         0x00000000, 0xffffffff },
8143                 { RCVDBDI_STD_BD+4, 0x0000,
8144                         0x00000000, 0xffffffff },
8145                 { RCVDBDI_STD_BD+8, 0x0000,
8146                         0x00000000, 0xffff0002 },
8147                 { RCVDBDI_STD_BD+0xc, 0x0000,
8148                         0x00000000, 0xffffffff },
8149         
8150                 /* Receive BD Initiator Control Registers. */
8151                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8152                         0x00000000, 0xffffffff },
8153                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8154                         0x00000000, 0x000003ff },
8155                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8156                         0x00000000, 0xffffffff },
8157         
8158                 /* Host Coalescing Control Registers. */
8159                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8160                         0x00000000, 0x00000004 },
8161                 { HOSTCC_MODE, TG3_FL_5705,
8162                         0x00000000, 0x000000f6 },
8163                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8164                         0x00000000, 0xffffffff },
8165                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8166                         0x00000000, 0x000003ff },
8167                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8168                         0x00000000, 0xffffffff },
8169                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8170                         0x00000000, 0x000003ff },
8171                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8172                         0x00000000, 0xffffffff },
8173                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8174                         0x00000000, 0x000000ff },
8175                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8176                         0x00000000, 0xffffffff },
8177                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8178                         0x00000000, 0x000000ff },
8179                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8180                         0x00000000, 0xffffffff },
8181                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8182                         0x00000000, 0xffffffff },
8183                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8184                         0x00000000, 0xffffffff },
8185                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8186                         0x00000000, 0x000000ff },
8187                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8188                         0x00000000, 0xffffffff },
8189                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8190                         0x00000000, 0x000000ff },
8191                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8192                         0x00000000, 0xffffffff },
8193                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8194                         0x00000000, 0xffffffff },
8195                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8196                         0x00000000, 0xffffffff },
8197                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8198                         0x00000000, 0xffffffff },
8199                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8200                         0x00000000, 0xffffffff },
8201                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8202                         0xffffffff, 0x00000000 },
8203                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8204                         0xffffffff, 0x00000000 },
8205
8206                 /* Buffer Manager Control Registers. */
8207                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8208                         0x00000000, 0x007fff80 },
8209                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8210                         0x00000000, 0x007fffff },
8211                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8212                         0x00000000, 0x0000003f },
8213                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8214                         0x00000000, 0x000001ff },
8215                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8216                         0x00000000, 0x000001ff },
8217                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8218                         0xffffffff, 0x00000000 },
8219                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8220                         0xffffffff, 0x00000000 },
8221         
8222                 /* Mailbox Registers */
8223                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8224                         0x00000000, 0x000001ff },
8225                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8226                         0x00000000, 0x000001ff },
8227                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8228                         0x00000000, 0x000007ff },
8229                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8230                         0x00000000, 0x000001ff },
8231
8232                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8233         };
8234
8235         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8236                 is_5705 = 1;
8237         else
8238                 is_5705 = 0;
8239
8240         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8241                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8242                         continue;
8243
8244                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8245                         continue;
8246
8247                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8248                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8249                         continue;
8250
8251                 offset = (u32) reg_tbl[i].offset;
8252                 read_mask = reg_tbl[i].read_mask;
8253                 write_mask = reg_tbl[i].write_mask;
8254
8255                 /* Save the original register content */
8256                 save_val = tr32(offset);
8257
8258                 /* Determine the read-only value. */
8259                 read_val = save_val & read_mask;
8260
8261                 /* Write zero to the register, then make sure the read-only bits
8262                  * are not changed and the read/write bits are all zeros.
8263                  */
8264                 tw32(offset, 0);
8265
8266                 val = tr32(offset);
8267
8268                 /* Test the read-only and read/write bits. */
8269                 if (((val & read_mask) != read_val) || (val & write_mask))
8270                         goto out;
8271
8272                 /* Write ones to all the bits defined by RdMask and WrMask, then
8273                  * make sure the read-only bits are not changed and the
8274                  * read/write bits are all ones.
8275                  */
8276                 tw32(offset, read_mask | write_mask);
8277
8278                 val = tr32(offset);
8279
8280                 /* Test the read-only bits. */
8281                 if ((val & read_mask) != read_val)
8282                         goto out;
8283
8284                 /* Test the read/write bits. */
8285                 if ((val & write_mask) != write_mask)
8286                         goto out;
8287
8288                 tw32(offset, save_val);
8289         }
8290
8291         return 0;
8292
8293 out:
8294         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8295         tw32(offset, save_val);
8296         return -EIO;
8297 }
8298
8299 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8300 {
8301         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8302         int i;
8303         u32 j;
8304
8305         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8306                 for (j = 0; j < len; j += 4) {
8307                         u32 val;
8308
8309                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8310                         tg3_read_mem(tp, offset + j, &val);
8311                         if (val != test_pattern[i])
8312                                 return -EIO;
8313                 }
8314         }
8315         return 0;
8316 }
8317
8318 static int tg3_test_memory(struct tg3 *tp)
8319 {
8320         static struct mem_entry {
8321                 u32 offset;
8322                 u32 len;
8323         } mem_tbl_570x[] = {
8324                 { 0x00000000, 0x00b50},
8325                 { 0x00002000, 0x1c000},
8326                 { 0xffffffff, 0x00000}
8327         }, mem_tbl_5705[] = {
8328                 { 0x00000100, 0x0000c},
8329                 { 0x00000200, 0x00008},
8330                 { 0x00004000, 0x00800},
8331                 { 0x00006000, 0x01000},
8332                 { 0x00008000, 0x02000},
8333                 { 0x00010000, 0x0e000},
8334                 { 0xffffffff, 0x00000}
8335         }, mem_tbl_5755[] = {
8336                 { 0x00000200, 0x00008},
8337                 { 0x00004000, 0x00800},
8338                 { 0x00006000, 0x00800},
8339                 { 0x00008000, 0x02000},
8340                 { 0x00010000, 0x0c000},
8341                 { 0xffffffff, 0x00000}
8342         };
8343         struct mem_entry *mem_tbl;
8344         int err = 0;
8345         int i;
8346
8347         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8348                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8349                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8350                         mem_tbl = mem_tbl_5755;
8351                 else
8352                         mem_tbl = mem_tbl_5705;
8353         } else
8354                 mem_tbl = mem_tbl_570x;
8355
8356         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8357                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8358                     mem_tbl[i].len)) != 0)
8359                         break;
8360         }
8361         
8362         return err;
8363 }
8364
8365 #define TG3_MAC_LOOPBACK        0
8366 #define TG3_PHY_LOOPBACK        1
8367
8368 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8369 {
8370         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8371         u32 desc_idx;
8372         struct sk_buff *skb, *rx_skb;
8373         u8 *tx_data;
8374         dma_addr_t map;
8375         int num_pkts, tx_len, rx_len, i, err;
8376         struct tg3_rx_buffer_desc *desc;
8377
8378         if (loopback_mode == TG3_MAC_LOOPBACK) {
8379                 /* HW errata - mac loopback fails in some cases on 5780.
8380                  * Normal traffic and PHY loopback are not affected by
8381                  * errata.
8382                  */
8383                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8384                         return 0;
8385
8386                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8387                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8388                            MAC_MODE_PORT_MODE_GMII;
8389                 tw32(MAC_MODE, mac_mode);
8390         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8391                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8392                                            BMCR_SPEED1000);
8393                 udelay(40);
8394                 /* reset to prevent losing 1st rx packet intermittently */
8395                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8396                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8397                         udelay(10);
8398                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8399                 }
8400                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8401                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8402                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8403                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8404                 tw32(MAC_MODE, mac_mode);
8405         }
8406         else
8407                 return -EINVAL;
8408
8409         err = -EIO;
8410
8411         tx_len = 1514;
8412         skb = dev_alloc_skb(tx_len);
8413         tx_data = skb_put(skb, tx_len);
8414         memcpy(tx_data, tp->dev->dev_addr, 6);
8415         memset(tx_data + 6, 0x0, 8);
8416
8417         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8418
8419         for (i = 14; i < tx_len; i++)
8420                 tx_data[i] = (u8) (i & 0xff);
8421
8422         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8423
8424         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8425              HOSTCC_MODE_NOW);
8426
8427         udelay(10);
8428
8429         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8430
8431         num_pkts = 0;
8432
8433         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8434
8435         tp->tx_prod++;
8436         num_pkts++;
8437
8438         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8439                      tp->tx_prod);
8440         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8441
8442         udelay(10);
8443
8444         for (i = 0; i < 10; i++) {
8445                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8446                        HOSTCC_MODE_NOW);
8447
8448                 udelay(10);
8449
8450                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8451                 rx_idx = tp->hw_status->idx[0].rx_producer;
8452                 if ((tx_idx == tp->tx_prod) &&
8453                     (rx_idx == (rx_start_idx + num_pkts)))
8454                         break;
8455         }
8456
8457         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8458         dev_kfree_skb(skb);
8459
8460         if (tx_idx != tp->tx_prod)
8461                 goto out;
8462
8463         if (rx_idx != rx_start_idx + num_pkts)
8464                 goto out;
8465
8466         desc = &tp->rx_rcb[rx_start_idx];
8467         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8468         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8469         if (opaque_key != RXD_OPAQUE_RING_STD)
8470                 goto out;
8471
8472         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8473             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8474                 goto out;
8475
8476         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8477         if (rx_len != tx_len)
8478                 goto out;
8479
8480         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8481
8482         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8483         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8484
8485         for (i = 14; i < tx_len; i++) {
8486                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8487                         goto out;
8488         }
8489         err = 0;
8490         
8491         /* tg3_free_rings will unmap and free the rx_skb */
8492 out:
8493         return err;
8494 }
8495
8496 #define TG3_MAC_LOOPBACK_FAILED         1
8497 #define TG3_PHY_LOOPBACK_FAILED         2
8498 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8499                                          TG3_PHY_LOOPBACK_FAILED)
8500
8501 static int tg3_test_loopback(struct tg3 *tp)
8502 {
8503         int err = 0;
8504
8505         if (!netif_running(tp->dev))
8506                 return TG3_LOOPBACK_FAILED;
8507
8508         tg3_reset_hw(tp);
8509
8510         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8511                 err |= TG3_MAC_LOOPBACK_FAILED;
8512         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8513                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8514                         err |= TG3_PHY_LOOPBACK_FAILED;
8515         }
8516
8517         return err;
8518 }
8519
8520 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8521                           u64 *data)
8522 {
8523         struct tg3 *tp = netdev_priv(dev);
8524
8525         if (tp->link_config.phy_is_low_power)
8526                 tg3_set_power_state(tp, PCI_D0);
8527
8528         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8529
8530         if (tg3_test_nvram(tp) != 0) {
8531                 etest->flags |= ETH_TEST_FL_FAILED;
8532                 data[0] = 1;
8533         }
8534         if (tg3_test_link(tp) != 0) {
8535                 etest->flags |= ETH_TEST_FL_FAILED;
8536                 data[1] = 1;
8537         }
8538         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8539                 int err, irq_sync = 0;
8540
8541                 if (netif_running(dev)) {
8542                         tg3_netif_stop(tp);
8543                         irq_sync = 1;
8544                 }
8545
8546                 tg3_full_lock(tp, irq_sync);
8547
8548                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8549                 err = tg3_nvram_lock(tp);
8550                 tg3_halt_cpu(tp, RX_CPU_BASE);
8551                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8552                         tg3_halt_cpu(tp, TX_CPU_BASE);
8553                 if (!err)
8554                         tg3_nvram_unlock(tp);
8555
8556                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8557                         tg3_phy_reset(tp);
8558
8559                 if (tg3_test_registers(tp) != 0) {
8560                         etest->flags |= ETH_TEST_FL_FAILED;
8561                         data[2] = 1;
8562                 }
8563                 if (tg3_test_memory(tp) != 0) {
8564                         etest->flags |= ETH_TEST_FL_FAILED;
8565                         data[3] = 1;
8566                 }
8567                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8568                         etest->flags |= ETH_TEST_FL_FAILED;
8569
8570                 tg3_full_unlock(tp);
8571
8572                 if (tg3_test_interrupt(tp) != 0) {
8573                         etest->flags |= ETH_TEST_FL_FAILED;
8574                         data[5] = 1;
8575                 }
8576
8577                 tg3_full_lock(tp, 0);
8578
8579                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8580                 if (netif_running(dev)) {
8581                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8582                         tg3_init_hw(tp);
8583                         tg3_netif_start(tp);
8584                 }
8585
8586                 tg3_full_unlock(tp);
8587         }
8588         if (tp->link_config.phy_is_low_power)
8589                 tg3_set_power_state(tp, PCI_D3hot);
8590
8591 }
8592
8593 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8594 {
8595         struct mii_ioctl_data *data = if_mii(ifr);
8596         struct tg3 *tp = netdev_priv(dev);
8597         int err;
8598
8599         switch(cmd) {
8600         case SIOCGMIIPHY:
8601                 data->phy_id = PHY_ADDR;
8602
8603                 /* fallthru */
8604         case SIOCGMIIREG: {
8605                 u32 mii_regval;
8606
8607                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8608                         break;                  /* We have no PHY */
8609
8610                 if (tp->link_config.phy_is_low_power)
8611                         return -EAGAIN;
8612
8613                 spin_lock_bh(&tp->lock);
8614                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8615                 spin_unlock_bh(&tp->lock);
8616
8617                 data->val_out = mii_regval;
8618
8619                 return err;
8620         }
8621
8622         case SIOCSMIIREG:
8623                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8624                         break;                  /* We have no PHY */
8625
8626                 if (!capable(CAP_NET_ADMIN))
8627                         return -EPERM;
8628
8629                 if (tp->link_config.phy_is_low_power)
8630                         return -EAGAIN;
8631
8632                 spin_lock_bh(&tp->lock);
8633                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8634                 spin_unlock_bh(&tp->lock);
8635
8636                 return err;
8637
8638         default:
8639                 /* do nothing */
8640                 break;
8641         }
8642         return -EOPNOTSUPP;
8643 }
8644
8645 #if TG3_VLAN_TAG_USED
8646 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8647 {
8648         struct tg3 *tp = netdev_priv(dev);
8649
8650         tg3_full_lock(tp, 0);
8651
8652         tp->vlgrp = grp;
8653
8654         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8655         __tg3_set_rx_mode(dev);
8656
8657         tg3_full_unlock(tp);
8658 }
8659
8660 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8661 {
8662         struct tg3 *tp = netdev_priv(dev);
8663
8664         tg3_full_lock(tp, 0);
8665         if (tp->vlgrp)
8666                 tp->vlgrp->vlan_devices[vid] = NULL;
8667         tg3_full_unlock(tp);
8668 }
8669 #endif
8670
8671 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8672 {
8673         struct tg3 *tp = netdev_priv(dev);
8674
8675         memcpy(ec, &tp->coal, sizeof(*ec));
8676         return 0;
8677 }
8678
8679 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8680 {
8681         struct tg3 *tp = netdev_priv(dev);
8682         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8683         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8684
8685         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8686                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8687                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8688                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8689                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8690         }
8691
8692         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8693             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8694             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8695             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8696             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8697             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8698             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8699             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8700             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8701             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8702                 return -EINVAL;
8703
8704         /* No rx interrupts will be generated if both are zero */
8705         if ((ec->rx_coalesce_usecs == 0) &&
8706             (ec->rx_max_coalesced_frames == 0))
8707                 return -EINVAL;
8708
8709         /* No tx interrupts will be generated if both are zero */
8710         if ((ec->tx_coalesce_usecs == 0) &&
8711             (ec->tx_max_coalesced_frames == 0))
8712                 return -EINVAL;
8713
8714         /* Only copy relevant parameters, ignore all others. */
8715         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8716         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8717         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8718         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8719         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8720         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8721         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8722         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8723         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8724
8725         if (netif_running(dev)) {
8726                 tg3_full_lock(tp, 0);
8727                 __tg3_set_coalesce(tp, &tp->coal);
8728                 tg3_full_unlock(tp);
8729         }
8730         return 0;
8731 }
8732
8733 static struct ethtool_ops tg3_ethtool_ops = {
8734         .get_settings           = tg3_get_settings,
8735         .set_settings           = tg3_set_settings,
8736         .get_drvinfo            = tg3_get_drvinfo,
8737         .get_regs_len           = tg3_get_regs_len,
8738         .get_regs               = tg3_get_regs,
8739         .get_wol                = tg3_get_wol,
8740         .set_wol                = tg3_set_wol,
8741         .get_msglevel           = tg3_get_msglevel,
8742         .set_msglevel           = tg3_set_msglevel,
8743         .nway_reset             = tg3_nway_reset,
8744         .get_link               = ethtool_op_get_link,
8745         .get_eeprom_len         = tg3_get_eeprom_len,
8746         .get_eeprom             = tg3_get_eeprom,
8747         .set_eeprom             = tg3_set_eeprom,
8748         .get_ringparam          = tg3_get_ringparam,
8749         .set_ringparam          = tg3_set_ringparam,
8750         .get_pauseparam         = tg3_get_pauseparam,
8751         .set_pauseparam         = tg3_set_pauseparam,
8752         .get_rx_csum            = tg3_get_rx_csum,
8753         .set_rx_csum            = tg3_set_rx_csum,
8754         .get_tx_csum            = ethtool_op_get_tx_csum,
8755         .set_tx_csum            = tg3_set_tx_csum,
8756         .get_sg                 = ethtool_op_get_sg,
8757         .set_sg                 = ethtool_op_set_sg,
8758 #if TG3_TSO_SUPPORT != 0
8759         .get_tso                = ethtool_op_get_tso,
8760         .set_tso                = tg3_set_tso,
8761 #endif
8762         .self_test_count        = tg3_get_test_count,
8763         .self_test              = tg3_self_test,
8764         .get_strings            = tg3_get_strings,
8765         .phys_id                = tg3_phys_id,
8766         .get_stats_count        = tg3_get_stats_count,
8767         .get_ethtool_stats      = tg3_get_ethtool_stats,
8768         .get_coalesce           = tg3_get_coalesce,
8769         .set_coalesce           = tg3_set_coalesce,
8770         .get_perm_addr          = ethtool_op_get_perm_addr,
8771 };
8772
8773 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8774 {
8775         u32 cursize, val, magic;
8776
8777         tp->nvram_size = EEPROM_CHIP_SIZE;
8778
8779         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8780                 return;
8781
8782         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8783                 return;
8784
8785         /*
8786          * Size the chip by reading offsets at increasing powers of two.
8787          * When we encounter our validation signature, we know the addressing
8788          * has wrapped around, and thus have our chip size.
8789          */
8790         cursize = 0x10;
8791
8792         while (cursize < tp->nvram_size) {
8793                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8794                         return;
8795
8796                 if (val == magic)
8797                         break;
8798
8799                 cursize <<= 1;
8800         }
8801
8802         tp->nvram_size = cursize;
8803 }
8804                 
8805 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8806 {
8807         u32 val;
8808
8809         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8810                 return;
8811
8812         /* Selfboot format */
8813         if (val != TG3_EEPROM_MAGIC) {
8814                 tg3_get_eeprom_size(tp);
8815                 return;
8816         }
8817
8818         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8819                 if (val != 0) {
8820                         tp->nvram_size = (val >> 16) * 1024;
8821                         return;
8822                 }
8823         }
8824         tp->nvram_size = 0x20000;
8825 }
8826
8827 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8828 {
8829         u32 nvcfg1;
8830
8831         nvcfg1 = tr32(NVRAM_CFG1);
8832         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8833                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8834         }
8835         else {
8836                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8837                 tw32(NVRAM_CFG1, nvcfg1);
8838         }
8839
8840         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8841             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8842                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8843                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8844                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8845                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8846                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8847                                 break;
8848                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8849                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8850                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8851                                 break;
8852                         case FLASH_VENDOR_ATMEL_EEPROM:
8853                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8854                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8855                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8856                                 break;
8857                         case FLASH_VENDOR_ST:
8858                                 tp->nvram_jedecnum = JEDEC_ST;
8859                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8860                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8861                                 break;
8862                         case FLASH_VENDOR_SAIFUN:
8863                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8864                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8865                                 break;
8866                         case FLASH_VENDOR_SST_SMALL:
8867                         case FLASH_VENDOR_SST_LARGE:
8868                                 tp->nvram_jedecnum = JEDEC_SST;
8869                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8870                                 break;
8871                 }
8872         }
8873         else {
8874                 tp->nvram_jedecnum = JEDEC_ATMEL;
8875                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8876                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8877         }
8878 }
8879
8880 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8881 {
8882         u32 nvcfg1;
8883
8884         nvcfg1 = tr32(NVRAM_CFG1);
8885
8886         /* NVRAM protection for TPM */
8887         if (nvcfg1 & (1 << 27))
8888                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8889
8890         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8891                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8892                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8893                         tp->nvram_jedecnum = JEDEC_ATMEL;
8894                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8895                         break;
8896                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8897                         tp->nvram_jedecnum = JEDEC_ATMEL;
8898                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8899                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8900                         break;
8901                 case FLASH_5752VENDOR_ST_M45PE10:
8902                 case FLASH_5752VENDOR_ST_M45PE20:
8903                 case FLASH_5752VENDOR_ST_M45PE40:
8904                         tp->nvram_jedecnum = JEDEC_ST;
8905                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8906                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8907                         break;
8908         }
8909
8910         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8911                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8912                         case FLASH_5752PAGE_SIZE_256:
8913                                 tp->nvram_pagesize = 256;
8914                                 break;
8915                         case FLASH_5752PAGE_SIZE_512:
8916                                 tp->nvram_pagesize = 512;
8917                                 break;
8918                         case FLASH_5752PAGE_SIZE_1K:
8919                                 tp->nvram_pagesize = 1024;
8920                                 break;
8921                         case FLASH_5752PAGE_SIZE_2K:
8922                                 tp->nvram_pagesize = 2048;
8923                                 break;
8924                         case FLASH_5752PAGE_SIZE_4K:
8925                                 tp->nvram_pagesize = 4096;
8926                                 break;
8927                         case FLASH_5752PAGE_SIZE_264:
8928                                 tp->nvram_pagesize = 264;
8929                                 break;
8930                 }
8931         }
8932         else {
8933                 /* For eeprom, set pagesize to maximum eeprom size */
8934                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8935
8936                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8937                 tw32(NVRAM_CFG1, nvcfg1);
8938         }
8939 }
8940
8941 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8942 {
8943         u32 nvcfg1;
8944
8945         nvcfg1 = tr32(NVRAM_CFG1);
8946
8947         /* NVRAM protection for TPM */
8948         if (nvcfg1 & (1 << 27))
8949                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8950
8951         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8952                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8953                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8954                         tp->nvram_jedecnum = JEDEC_ATMEL;
8955                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8956                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8957
8958                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8959                         tw32(NVRAM_CFG1, nvcfg1);
8960                         break;
8961                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8962                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8963                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8964                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8965                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8966                         tp->nvram_jedecnum = JEDEC_ATMEL;
8967                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8968                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8969                         tp->nvram_pagesize = 264;
8970                         break;
8971                 case FLASH_5752VENDOR_ST_M45PE10:
8972                 case FLASH_5752VENDOR_ST_M45PE20:
8973                 case FLASH_5752VENDOR_ST_M45PE40:
8974                         tp->nvram_jedecnum = JEDEC_ST;
8975                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8976                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8977                         tp->nvram_pagesize = 256;
8978                         break;
8979         }
8980 }
8981
8982 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8983 {
8984         u32 nvcfg1;
8985
8986         nvcfg1 = tr32(NVRAM_CFG1);
8987
8988         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8989                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8990                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8991                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8992                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8993                         tp->nvram_jedecnum = JEDEC_ATMEL;
8994                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8995                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8996
8997                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8998                         tw32(NVRAM_CFG1, nvcfg1);
8999                         break;
9000                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9001                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9002                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9003                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9004                         tp->nvram_jedecnum = JEDEC_ATMEL;
9005                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9006                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9007                         tp->nvram_pagesize = 264;
9008                         break;
9009                 case FLASH_5752VENDOR_ST_M45PE10:
9010                 case FLASH_5752VENDOR_ST_M45PE20:
9011                 case FLASH_5752VENDOR_ST_M45PE40:
9012                         tp->nvram_jedecnum = JEDEC_ST;
9013                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9014                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9015                         tp->nvram_pagesize = 256;
9016                         break;
9017         }
9018 }
9019
9020 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9021 static void __devinit tg3_nvram_init(struct tg3 *tp)
9022 {
9023         int j;
9024
9025         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9026                 return;
9027
9028         tw32_f(GRC_EEPROM_ADDR,
9029              (EEPROM_ADDR_FSM_RESET |
9030               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9031                EEPROM_ADDR_CLKPERD_SHIFT)));
9032
9033         /* XXX schedule_timeout() ... */
9034         for (j = 0; j < 100; j++)
9035                 udelay(10);
9036
9037         /* Enable seeprom accesses. */
9038         tw32_f(GRC_LOCAL_CTRL,
9039              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9040         udelay(100);
9041
9042         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9043             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9044                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9045
9046                 if (tg3_nvram_lock(tp)) {
9047                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9048                                "tg3_nvram_init failed.\n", tp->dev->name);
9049                         return;
9050                 }
9051                 tg3_enable_nvram_access(tp);
9052
9053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9054                         tg3_get_5752_nvram_info(tp);
9055                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9056                         tg3_get_5755_nvram_info(tp);
9057                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9058                         tg3_get_5787_nvram_info(tp);
9059                 else
9060                         tg3_get_nvram_info(tp);
9061
9062                 tg3_get_nvram_size(tp);
9063
9064                 tg3_disable_nvram_access(tp);
9065                 tg3_nvram_unlock(tp);
9066
9067         } else {
9068                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9069
9070                 tg3_get_eeprom_size(tp);
9071         }
9072 }
9073
9074 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9075                                         u32 offset, u32 *val)
9076 {
9077         u32 tmp;
9078         int i;
9079
9080         if (offset > EEPROM_ADDR_ADDR_MASK ||
9081             (offset % 4) != 0)
9082                 return -EINVAL;
9083
9084         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9085                                         EEPROM_ADDR_DEVID_MASK |
9086                                         EEPROM_ADDR_READ);
9087         tw32(GRC_EEPROM_ADDR,
9088              tmp |
9089              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9090              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9091               EEPROM_ADDR_ADDR_MASK) |
9092              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9093
9094         for (i = 0; i < 10000; i++) {
9095                 tmp = tr32(GRC_EEPROM_ADDR);
9096
9097                 if (tmp & EEPROM_ADDR_COMPLETE)
9098                         break;
9099                 udelay(100);
9100         }
9101         if (!(tmp & EEPROM_ADDR_COMPLETE))
9102                 return -EBUSY;
9103
9104         *val = tr32(GRC_EEPROM_DATA);
9105         return 0;
9106 }
9107
9108 #define NVRAM_CMD_TIMEOUT 10000
9109
9110 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9111 {
9112         int i;
9113
9114         tw32(NVRAM_CMD, nvram_cmd);
9115         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9116                 udelay(10);
9117                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9118                         udelay(10);
9119                         break;
9120                 }
9121         }
9122         if (i == NVRAM_CMD_TIMEOUT) {
9123                 return -EBUSY;
9124         }
9125         return 0;
9126 }
9127
9128 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9129 {
9130         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9131             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9132             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9133             (tp->nvram_jedecnum == JEDEC_ATMEL))
9134
9135                 addr = ((addr / tp->nvram_pagesize) <<
9136                         ATMEL_AT45DB0X1B_PAGE_POS) +
9137                        (addr % tp->nvram_pagesize);
9138
9139         return addr;
9140 }
9141
9142 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9143 {
9144         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9145             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9146             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9147             (tp->nvram_jedecnum == JEDEC_ATMEL))
9148
9149                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9150                         tp->nvram_pagesize) +
9151                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9152
9153         return addr;
9154 }
9155
9156 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9157 {
9158         int ret;
9159
9160         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9161                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9162                 return -EINVAL;
9163         }
9164
9165         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9166                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9167
9168         offset = tg3_nvram_phys_addr(tp, offset);
9169
9170         if (offset > NVRAM_ADDR_MSK)
9171                 return -EINVAL;
9172
9173         ret = tg3_nvram_lock(tp);
9174         if (ret)
9175                 return ret;
9176
9177         tg3_enable_nvram_access(tp);
9178
9179         tw32(NVRAM_ADDR, offset);
9180         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9181                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9182
9183         if (ret == 0)
9184                 *val = swab32(tr32(NVRAM_RDDATA));
9185
9186         tg3_disable_nvram_access(tp);
9187
9188         tg3_nvram_unlock(tp);
9189
9190         return ret;
9191 }
9192
9193 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9194 {
9195         int err;
9196         u32 tmp;
9197
9198         err = tg3_nvram_read(tp, offset, &tmp);
9199         *val = swab32(tmp);
9200         return err;
9201 }
9202
9203 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9204                                     u32 offset, u32 len, u8 *buf)
9205 {
9206         int i, j, rc = 0;
9207         u32 val;
9208
9209         for (i = 0; i < len; i += 4) {
9210                 u32 addr, data;
9211
9212                 addr = offset + i;
9213
9214                 memcpy(&data, buf + i, 4);
9215
9216                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9217
9218                 val = tr32(GRC_EEPROM_ADDR);
9219                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9220
9221                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9222                         EEPROM_ADDR_READ);
9223                 tw32(GRC_EEPROM_ADDR, val |
9224                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9225                         (addr & EEPROM_ADDR_ADDR_MASK) |
9226                         EEPROM_ADDR_START |
9227                         EEPROM_ADDR_WRITE);
9228                 
9229                 for (j = 0; j < 10000; j++) {
9230                         val = tr32(GRC_EEPROM_ADDR);
9231
9232                         if (val & EEPROM_ADDR_COMPLETE)
9233                                 break;
9234                         udelay(100);
9235                 }
9236                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9237                         rc = -EBUSY;
9238                         break;
9239                 }
9240         }
9241
9242         return rc;
9243 }
9244
9245 /* offset and length are dword aligned */
9246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9247                 u8 *buf)
9248 {
9249         int ret = 0;
9250         u32 pagesize = tp->nvram_pagesize;
9251         u32 pagemask = pagesize - 1;
9252         u32 nvram_cmd;
9253         u8 *tmp;
9254
9255         tmp = kmalloc(pagesize, GFP_KERNEL);
9256         if (tmp == NULL)
9257                 return -ENOMEM;
9258
9259         while (len) {
9260                 int j;
9261                 u32 phy_addr, page_off, size;
9262
9263                 phy_addr = offset & ~pagemask;
9264         
9265                 for (j = 0; j < pagesize; j += 4) {
9266                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9267                                                 (u32 *) (tmp + j))))
9268                                 break;
9269                 }
9270                 if (ret)
9271                         break;
9272
9273                 page_off = offset & pagemask;
9274                 size = pagesize;
9275                 if (len < size)
9276                         size = len;
9277
9278                 len -= size;
9279
9280                 memcpy(tmp + page_off, buf, size);
9281
9282                 offset = offset + (pagesize - page_off);
9283
9284                 tg3_enable_nvram_access(tp);
9285
9286                 /*
9287                  * Before we can erase the flash page, we need
9288                  * to issue a special "write enable" command.
9289                  */
9290                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9291
9292                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9293                         break;
9294
9295                 /* Erase the target page */
9296                 tw32(NVRAM_ADDR, phy_addr);
9297
9298                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9299                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9300
9301                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9302                         break;
9303
9304                 /* Issue another write enable to start the write. */
9305                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9306
9307                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9308                         break;
9309
9310                 for (j = 0; j < pagesize; j += 4) {
9311                         u32 data;
9312
9313                         data = *((u32 *) (tmp + j));
9314                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9315
9316                         tw32(NVRAM_ADDR, phy_addr + j);
9317
9318                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9319                                 NVRAM_CMD_WR;
9320
9321                         if (j == 0)
9322                                 nvram_cmd |= NVRAM_CMD_FIRST;
9323                         else if (j == (pagesize - 4))
9324                                 nvram_cmd |= NVRAM_CMD_LAST;
9325
9326                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9327                                 break;
9328                 }
9329                 if (ret)
9330                         break;
9331         }
9332
9333         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9334         tg3_nvram_exec_cmd(tp, nvram_cmd);
9335
9336         kfree(tmp);
9337
9338         return ret;
9339 }
9340
9341 /* offset and length are dword aligned */
9342 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9343                 u8 *buf)
9344 {
9345         int i, ret = 0;
9346
9347         for (i = 0; i < len; i += 4, offset += 4) {
9348                 u32 data, page_off, phy_addr, nvram_cmd;
9349
9350                 memcpy(&data, buf + i, 4);
9351                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9352
9353                 page_off = offset % tp->nvram_pagesize;
9354
9355                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9356
9357                 tw32(NVRAM_ADDR, phy_addr);
9358
9359                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9360
9361                 if ((page_off == 0) || (i == 0))
9362                         nvram_cmd |= NVRAM_CMD_FIRST;
9363                 else if (page_off == (tp->nvram_pagesize - 4))
9364                         nvram_cmd |= NVRAM_CMD_LAST;
9365
9366                 if (i == (len - 4))
9367                         nvram_cmd |= NVRAM_CMD_LAST;
9368
9369                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9370                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9371                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9372                     (tp->nvram_jedecnum == JEDEC_ST) &&
9373                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9374
9375                         if ((ret = tg3_nvram_exec_cmd(tp,
9376                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9377                                 NVRAM_CMD_DONE)))
9378
9379                                 break;
9380                 }
9381                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9382                         /* We always do complete word writes to eeprom. */
9383                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9384                 }
9385
9386                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9387                         break;
9388         }
9389         return ret;
9390 }
9391
9392 /* offset and length are dword aligned */
9393 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9394 {
9395         int ret;
9396
9397         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9398                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9399                 return -EINVAL;
9400         }
9401
9402         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9403                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9404                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9405                 udelay(40);
9406         }
9407
9408         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9409                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9410         }
9411         else {
9412                 u32 grc_mode;
9413
9414                 ret = tg3_nvram_lock(tp);
9415                 if (ret)
9416                         return ret;
9417
9418                 tg3_enable_nvram_access(tp);
9419                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9420                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9421                         tw32(NVRAM_WRITE1, 0x406);
9422
9423                 grc_mode = tr32(GRC_MODE);
9424                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9425
9426                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9427                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9428
9429                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9430                                 buf);
9431                 }
9432                 else {
9433                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9434                                 buf);
9435                 }
9436
9437                 grc_mode = tr32(GRC_MODE);
9438                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9439
9440                 tg3_disable_nvram_access(tp);
9441                 tg3_nvram_unlock(tp);
9442         }
9443
9444         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9445                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9446                 udelay(40);
9447         }
9448
9449         return ret;
9450 }
9451
9452 struct subsys_tbl_ent {
9453         u16 subsys_vendor, subsys_devid;
9454         u32 phy_id;
9455 };
9456
9457 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9458         /* Broadcom boards. */
9459         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9460         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9461         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9462         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9463         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9464         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9465         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9466         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9467         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9468         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9469         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9470
9471         /* 3com boards. */
9472         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9473         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9474         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9475         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9476         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9477
9478         /* DELL boards. */
9479         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9480         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9481         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9482         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9483
9484         /* Compaq boards. */
9485         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9486         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9487         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9488         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9489         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9490
9491         /* IBM boards. */
9492         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9493 };
9494
9495 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9496 {
9497         int i;
9498
9499         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9500                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9501                      tp->pdev->subsystem_vendor) &&
9502                     (subsys_id_to_phy_id[i].subsys_devid ==
9503                      tp->pdev->subsystem_device))
9504                         return &subsys_id_to_phy_id[i];
9505         }
9506         return NULL;
9507 }
9508
9509 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9510 {
9511         u32 val;
9512         u16 pmcsr;
9513
9514         /* On some early chips the SRAM cannot be accessed in D3hot state,
9515          * so need make sure we're in D0.
9516          */
9517         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9518         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9519         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9520         msleep(1);
9521
9522         /* Make sure register accesses (indirect or otherwise)
9523          * will function correctly.
9524          */
9525         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9526                                tp->misc_host_ctrl);
9527
9528         tp->phy_id = PHY_ID_INVALID;
9529         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9530
9531         /* Do not even try poking around in here on Sun parts.  */
9532         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9533                 return;
9534
9535         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9536         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9537                 u32 nic_cfg, led_cfg;
9538                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9539                 int eeprom_phy_serdes = 0;
9540
9541                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9542                 tp->nic_sram_data_cfg = nic_cfg;
9543
9544                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9545                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9546                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9547                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9548                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9549                     (ver > 0) && (ver < 0x100))
9550                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9551
9552                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9553                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9554                         eeprom_phy_serdes = 1;
9555
9556                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9557                 if (nic_phy_id != 0) {
9558                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9559                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9560
9561                         eeprom_phy_id  = (id1 >> 16) << 10;
9562                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9563                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9564                 } else
9565                         eeprom_phy_id = 0;
9566
9567                 tp->phy_id = eeprom_phy_id;
9568                 if (eeprom_phy_serdes) {
9569                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9570                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9571                         else
9572                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9573                 }
9574
9575                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9576                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9577                                     SHASTA_EXT_LED_MODE_MASK);
9578                 else
9579                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9580
9581                 switch (led_cfg) {
9582                 default:
9583                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9584                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9585                         break;
9586
9587                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9588                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9589                         break;
9590
9591                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9592                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9593
9594                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9595                          * read on some older 5700/5701 bootcode.
9596                          */
9597                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9598                             ASIC_REV_5700 ||
9599                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9600                             ASIC_REV_5701)
9601                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9602
9603                         break;
9604
9605                 case SHASTA_EXT_LED_SHARED:
9606                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9607                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9608                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9609                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9610                                                  LED_CTRL_MODE_PHY_2);
9611                         break;
9612
9613                 case SHASTA_EXT_LED_MAC:
9614                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9615                         break;
9616
9617                 case SHASTA_EXT_LED_COMBO:
9618                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9619                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9620                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9621                                                  LED_CTRL_MODE_PHY_2);
9622                         break;
9623
9624                 };
9625
9626                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9627                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9628                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9629                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9630
9631                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9632                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9633                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9634                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9635
9636                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9637                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9638                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9639                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9640                 }
9641                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9642                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9643
9644                 if (cfg2 & (1 << 17))
9645                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9646
9647                 /* serdes signal pre-emphasis in register 0x590 set by */
9648                 /* bootcode if bit 18 is set */
9649                 if (cfg2 & (1 << 18))
9650                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9651         }
9652 }
9653
9654 static int __devinit tg3_phy_probe(struct tg3 *tp)
9655 {
9656         u32 hw_phy_id_1, hw_phy_id_2;
9657         u32 hw_phy_id, hw_phy_id_masked;
9658         int err;
9659
9660         /* Reading the PHY ID register can conflict with ASF
9661          * firwmare access to the PHY hardware.
9662          */
9663         err = 0;
9664         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9665                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9666         } else {
9667                 /* Now read the physical PHY_ID from the chip and verify
9668                  * that it is sane.  If it doesn't look good, we fall back
9669                  * to either the hard-coded table based PHY_ID and failing
9670                  * that the value found in the eeprom area.
9671                  */
9672                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9673                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9674
9675                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9676                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9677                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9678
9679                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9680         }
9681
9682         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9683                 tp->phy_id = hw_phy_id;
9684                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9685                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9686                 else
9687                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9688         } else {
9689                 if (tp->phy_id != PHY_ID_INVALID) {
9690                         /* Do nothing, phy ID already set up in
9691                          * tg3_get_eeprom_hw_cfg().
9692                          */
9693                 } else {
9694                         struct subsys_tbl_ent *p;
9695
9696                         /* No eeprom signature?  Try the hardcoded
9697                          * subsys device table.
9698                          */
9699                         p = lookup_by_subsys(tp);
9700                         if (!p)
9701                                 return -ENODEV;
9702
9703                         tp->phy_id = p->phy_id;
9704                         if (!tp->phy_id ||
9705                             tp->phy_id == PHY_ID_BCM8002)
9706                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9707                 }
9708         }
9709
9710         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9711             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9712                 u32 bmsr, adv_reg, tg3_ctrl;
9713
9714                 tg3_readphy(tp, MII_BMSR, &bmsr);
9715                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9716                     (bmsr & BMSR_LSTATUS))
9717                         goto skip_phy_reset;
9718                     
9719                 err = tg3_phy_reset(tp);
9720                 if (err)
9721                         return err;
9722
9723                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9724                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9725                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9726                 tg3_ctrl = 0;
9727                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9728                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9729                                     MII_TG3_CTRL_ADV_1000_FULL);
9730                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9731                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9732                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9733                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9734                 }
9735
9736                 if (!tg3_copper_is_advertising_all(tp)) {
9737                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9738
9739                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9740                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9741
9742                         tg3_writephy(tp, MII_BMCR,
9743                                      BMCR_ANENABLE | BMCR_ANRESTART);
9744                 }
9745                 tg3_phy_set_wirespeed(tp);
9746
9747                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9748                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9749                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9750         }
9751
9752 skip_phy_reset:
9753         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9754                 err = tg3_init_5401phy_dsp(tp);
9755                 if (err)
9756                         return err;
9757         }
9758
9759         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9760                 err = tg3_init_5401phy_dsp(tp);
9761         }
9762
9763         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9764                 tp->link_config.advertising =
9765                         (ADVERTISED_1000baseT_Half |
9766                          ADVERTISED_1000baseT_Full |
9767                          ADVERTISED_Autoneg |
9768                          ADVERTISED_FIBRE);
9769         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9770                 tp->link_config.advertising &=
9771                         ~(ADVERTISED_1000baseT_Half |
9772                           ADVERTISED_1000baseT_Full);
9773
9774         return err;
9775 }
9776
9777 static void __devinit tg3_read_partno(struct tg3 *tp)
9778 {
9779         unsigned char vpd_data[256];
9780         int i;
9781         u32 magic;
9782
9783         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9784                 /* Sun decided not to put the necessary bits in the
9785                  * NVRAM of their onboard tg3 parts :(
9786                  */
9787                 strcpy(tp->board_part_number, "Sun 570X");
9788                 return;
9789         }
9790
9791         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9792                 return;
9793
9794         if (magic == TG3_EEPROM_MAGIC) {
9795                 for (i = 0; i < 256; i += 4) {
9796                         u32 tmp;
9797
9798                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9799                                 goto out_not_found;
9800
9801                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9802                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9803                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9804                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9805                 }
9806         } else {
9807                 int vpd_cap;
9808
9809                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9810                 for (i = 0; i < 256; i += 4) {
9811                         u32 tmp, j = 0;
9812                         u16 tmp16;
9813
9814                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9815                                               i);
9816                         while (j++ < 100) {
9817                                 pci_read_config_word(tp->pdev, vpd_cap +
9818                                                      PCI_VPD_ADDR, &tmp16);
9819                                 if (tmp16 & 0x8000)
9820                                         break;
9821                                 msleep(1);
9822                         }
9823                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9824                                               &tmp);
9825                         tmp = cpu_to_le32(tmp);
9826                         memcpy(&vpd_data[i], &tmp, 4);
9827                 }
9828         }
9829
9830         /* Now parse and find the part number. */
9831         for (i = 0; i < 256; ) {
9832                 unsigned char val = vpd_data[i];
9833                 int block_end;
9834
9835                 if (val == 0x82 || val == 0x91) {
9836                         i = (i + 3 +
9837                              (vpd_data[i + 1] +
9838                               (vpd_data[i + 2] << 8)));
9839                         continue;
9840                 }
9841
9842                 if (val != 0x90)
9843                         goto out_not_found;
9844
9845                 block_end = (i + 3 +
9846                              (vpd_data[i + 1] +
9847                               (vpd_data[i + 2] << 8)));
9848                 i += 3;
9849                 while (i < block_end) {
9850                         if (vpd_data[i + 0] == 'P' &&
9851                             vpd_data[i + 1] == 'N') {
9852                                 int partno_len = vpd_data[i + 2];
9853
9854                                 if (partno_len > 24)
9855                                         goto out_not_found;
9856
9857                                 memcpy(tp->board_part_number,
9858                                        &vpd_data[i + 3],
9859                                        partno_len);
9860
9861                                 /* Success. */
9862                                 return;
9863                         }
9864                 }
9865
9866                 /* Part number not found. */
9867                 goto out_not_found;
9868         }
9869
9870 out_not_found:
9871         strcpy(tp->board_part_number, "none");
9872 }
9873
9874 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9875 {
9876         u32 val, offset, start;
9877
9878         if (tg3_nvram_read_swab(tp, 0, &val))
9879                 return;
9880
9881         if (val != TG3_EEPROM_MAGIC)
9882                 return;
9883
9884         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9885             tg3_nvram_read_swab(tp, 0x4, &start))
9886                 return;
9887
9888         offset = tg3_nvram_logical_addr(tp, offset);
9889         if (tg3_nvram_read_swab(tp, offset, &val))
9890                 return;
9891
9892         if ((val & 0xfc000000) == 0x0c000000) {
9893                 u32 ver_offset, addr;
9894                 int i;
9895
9896                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9897                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9898                         return;
9899
9900                 if (val != 0)
9901                         return;
9902
9903                 addr = offset + ver_offset - start;
9904                 for (i = 0; i < 16; i += 4) {
9905                         if (tg3_nvram_read(tp, addr + i, &val))
9906                                 return;
9907
9908                         val = cpu_to_le32(val);
9909                         memcpy(tp->fw_ver + i, &val, 4);
9910                 }
9911         }
9912 }
9913
9914 #ifdef CONFIG_SPARC64
9915 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9916 {
9917         struct pci_dev *pdev = tp->pdev;
9918         struct pcidev_cookie *pcp = pdev->sysdata;
9919
9920         if (pcp != NULL) {
9921                 int node = pcp->prom_node;
9922                 u32 venid;
9923                 int err;
9924
9925                 err = prom_getproperty(node, "subsystem-vendor-id",
9926                                        (char *) &venid, sizeof(venid));
9927                 if (err == 0 || err == -1)
9928                         return 0;
9929                 if (venid == PCI_VENDOR_ID_SUN)
9930                         return 1;
9931
9932                 /* TG3 chips onboard the SunBlade-2500 don't have the
9933                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9934                  * are distinguishable from non-Sun variants by being
9935                  * named "network" by the firmware.  Non-Sun cards will
9936                  * show up as being named "ethernet".
9937                  */
9938                 if (!strcmp(pcp->prom_name, "network"))
9939                         return 1;
9940         }
9941         return 0;
9942 }
9943 #endif
9944
9945 static int __devinit tg3_get_invariants(struct tg3 *tp)
9946 {
9947         static struct pci_device_id write_reorder_chipsets[] = {
9948                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9949                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9950                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9951                              PCI_DEVICE_ID_VIA_8385_0) },
9952                 { },
9953         };
9954         u32 misc_ctrl_reg;
9955         u32 cacheline_sz_reg;
9956         u32 pci_state_reg, grc_misc_cfg;
9957         u32 val;
9958         u16 pci_cmd;
9959         int err;
9960
9961 #ifdef CONFIG_SPARC64
9962         if (tg3_is_sun_570X(tp))
9963                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9964 #endif
9965
9966         /* Force memory write invalidate off.  If we leave it on,
9967          * then on 5700_BX chips we have to enable a workaround.
9968          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9969          * to match the cacheline size.  The Broadcom driver have this
9970          * workaround but turns MWI off all the times so never uses
9971          * it.  This seems to suggest that the workaround is insufficient.
9972          */
9973         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9974         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9975         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9976
9977         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9978          * has the register indirect write enable bit set before
9979          * we try to access any of the MMIO registers.  It is also
9980          * critical that the PCI-X hw workaround situation is decided
9981          * before that as well.
9982          */
9983         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9984                               &misc_ctrl_reg);
9985
9986         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9987                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9988
9989         /* Wrong chip ID in 5752 A0. This code can be removed later
9990          * as A0 is not in production.
9991          */
9992         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9993                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9994
9995         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9996          * we need to disable memory and use config. cycles
9997          * only to access all registers. The 5702/03 chips
9998          * can mistakenly decode the special cycles from the
9999          * ICH chipsets as memory write cycles, causing corruption
10000          * of register and memory space. Only certain ICH bridges
10001          * will drive special cycles with non-zero data during the
10002          * address phase which can fall within the 5703's address
10003          * range. This is not an ICH bug as the PCI spec allows
10004          * non-zero address during special cycles. However, only
10005          * these ICH bridges are known to drive non-zero addresses
10006          * during special cycles.
10007          *
10008          * Since special cycles do not cross PCI bridges, we only
10009          * enable this workaround if the 5703 is on the secondary
10010          * bus of these ICH bridges.
10011          */
10012         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10013             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10014                 static struct tg3_dev_id {
10015                         u32     vendor;
10016                         u32     device;
10017                         u32     rev;
10018                 } ich_chipsets[] = {
10019                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10020                           PCI_ANY_ID },
10021                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10022                           PCI_ANY_ID },
10023                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10024                           0xa },
10025                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10026                           PCI_ANY_ID },
10027                         { },
10028                 };
10029                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10030                 struct pci_dev *bridge = NULL;
10031
10032                 while (pci_id->vendor != 0) {
10033                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10034                                                 bridge);
10035                         if (!bridge) {
10036                                 pci_id++;
10037                                 continue;
10038                         }
10039                         if (pci_id->rev != PCI_ANY_ID) {
10040                                 u8 rev;
10041
10042                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10043                                                      &rev);
10044                                 if (rev > pci_id->rev)
10045                                         continue;
10046                         }
10047                         if (bridge->subordinate &&
10048                             (bridge->subordinate->number ==
10049                              tp->pdev->bus->number)) {
10050
10051                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10052                                 pci_dev_put(bridge);
10053                                 break;
10054                         }
10055                 }
10056         }
10057
10058         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10059          * DMA addresses > 40-bit. This bridge may have other additional
10060          * 57xx devices behind it in some 4-port NIC designs for example.
10061          * Any tg3 device found behind the bridge will also need the 40-bit
10062          * DMA workaround.
10063          */
10064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10065             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10066                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10067                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10068                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10069         }
10070         else {
10071                 struct pci_dev *bridge = NULL;
10072
10073                 do {
10074                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10075                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10076                                                 bridge);
10077                         if (bridge && bridge->subordinate &&
10078                             (bridge->subordinate->number <=
10079                              tp->pdev->bus->number) &&
10080                             (bridge->subordinate->subordinate >=
10081                              tp->pdev->bus->number)) {
10082                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10083                                 pci_dev_put(bridge);
10084                                 break;
10085                         }
10086                 } while (bridge);
10087         }
10088
10089         /* Initialize misc host control in PCI block. */
10090         tp->misc_host_ctrl |= (misc_ctrl_reg &
10091                                MISC_HOST_CTRL_CHIPREV);
10092         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10093                                tp->misc_host_ctrl);
10094
10095         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10096                               &cacheline_sz_reg);
10097
10098         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10099         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10100         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10101         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10102
10103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10107             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10108                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10109
10110         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10111             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10112                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10113
10114         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10116                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10117                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10118                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10119                 } else
10120                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10121         }
10122
10123         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10124             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10125             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10126             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10127             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10128                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10129
10130         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10131                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10132
10133         /* If we have an AMD 762 or VIA K8T800 chipset, write
10134          * reordering to the mailbox registers done by the host
10135          * controller can cause major troubles.  We read back from
10136          * every mailbox register write to force the writes to be
10137          * posted to the chip in order.
10138          */
10139         if (pci_dev_present(write_reorder_chipsets) &&
10140             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10141                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10142
10143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10144             tp->pci_lat_timer < 64) {
10145                 tp->pci_lat_timer = 64;
10146
10147                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10148                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10149                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10150                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10151
10152                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10153                                        cacheline_sz_reg);
10154         }
10155
10156         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10157                               &pci_state_reg);
10158
10159         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10160                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10161
10162                 /* If this is a 5700 BX chipset, and we are in PCI-X
10163                  * mode, enable register write workaround.
10164                  *
10165                  * The workaround is to use indirect register accesses
10166                  * for all chip writes not to mailbox registers.
10167                  */
10168                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10169                         u32 pm_reg;
10170                         u16 pci_cmd;
10171
10172                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10173
10174                         /* The chip can have it's power management PCI config
10175                          * space registers clobbered due to this bug.
10176                          * So explicitly force the chip into D0 here.
10177                          */
10178                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10179                                               &pm_reg);
10180                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10181                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10182                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10183                                                pm_reg);
10184
10185                         /* Also, force SERR#/PERR# in PCI command. */
10186                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10187                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10188                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10189                 }
10190         }
10191
10192         /* 5700 BX chips need to have their TX producer index mailboxes
10193          * written twice to workaround a bug.
10194          */
10195         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10196                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10197
10198         /* Back to back register writes can cause problems on this chip,
10199          * the workaround is to read back all reg writes except those to
10200          * mailbox regs.  See tg3_write_indirect_reg32().
10201          *
10202          * PCI Express 5750_A0 rev chips need this workaround too.
10203          */
10204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10205             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10206              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10207                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10208
10209         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10210                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10211         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10212                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10213
10214         /* Chip-specific fixup from Broadcom driver */
10215         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10216             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10217                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10218                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10219         }
10220
10221         /* Default fast path register access methods */
10222         tp->read32 = tg3_read32;
10223         tp->write32 = tg3_write32;
10224         tp->read32_mbox = tg3_read32;
10225         tp->write32_mbox = tg3_write32;
10226         tp->write32_tx_mbox = tg3_write32;
10227         tp->write32_rx_mbox = tg3_write32;
10228
10229         /* Various workaround register access methods */
10230         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10231                 tp->write32 = tg3_write_indirect_reg32;
10232         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10233                 tp->write32 = tg3_write_flush_reg32;
10234
10235         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10236             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10237                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10238                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10239                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10240         }
10241
10242         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10243                 tp->read32 = tg3_read_indirect_reg32;
10244                 tp->write32 = tg3_write_indirect_reg32;
10245                 tp->read32_mbox = tg3_read_indirect_mbox;
10246                 tp->write32_mbox = tg3_write_indirect_mbox;
10247                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10248                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10249
10250                 iounmap(tp->regs);
10251                 tp->regs = NULL;
10252
10253                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10254                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10255                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10256         }
10257
10258         /* Get eeprom hw config before calling tg3_set_power_state().
10259          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10260          * determined before calling tg3_set_power_state() so that
10261          * we know whether or not to switch out of Vaux power.
10262          * When the flag is set, it means that GPIO1 is used for eeprom
10263          * write protect and also implies that it is a LOM where GPIOs
10264          * are not used to switch power.
10265          */ 
10266         tg3_get_eeprom_hw_cfg(tp);
10267
10268         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10269          * GPIO1 driven high will bring 5700's external PHY out of reset.
10270          * It is also used as eeprom write protect on LOMs.
10271          */
10272         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10273         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10274             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10275                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10276                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10277         /* Unused GPIO3 must be driven as output on 5752 because there
10278          * are no pull-up resistors on unused GPIO pins.
10279          */
10280         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10281                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10282
10283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10284                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10285
10286         /* Force the chip into D0. */
10287         err = tg3_set_power_state(tp, PCI_D0);
10288         if (err) {
10289                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10290                        pci_name(tp->pdev));
10291                 return err;
10292         }
10293
10294         /* 5700 B0 chips do not support checksumming correctly due
10295          * to hardware bugs.
10296          */
10297         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10298                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10299
10300         /* Pseudo-header checksum is done by hardware logic and not
10301          * the offload processers, so make the chip do the pseudo-
10302          * header checksums on receive.  For transmit it is more
10303          * convenient to do the pseudo-header checksum in software
10304          * as Linux does that on transmit for us in all cases.
10305          */
10306         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10307         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10308
10309         /* Derive initial jumbo mode from MTU assigned in
10310          * ether_setup() via the alloc_etherdev() call
10311          */
10312         if (tp->dev->mtu > ETH_DATA_LEN &&
10313             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10314                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10315
10316         /* Determine WakeOnLan speed to use. */
10317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10318             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10319             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10320             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10321                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10322         } else {
10323                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10324         }
10325
10326         /* A few boards don't want Ethernet@WireSpeed phy feature */
10327         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10328             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10329              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10330              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10331             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10332                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10333
10334         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10335             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10336                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10337         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10338                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10339
10340         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10341             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10342             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10343                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10344
10345         tp->coalesce_mode = 0;
10346         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10347             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10348                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10349
10350         /* Initialize MAC MI mode, polling disabled. */
10351         tw32_f(MAC_MI_MODE, tp->mi_mode);
10352         udelay(80);
10353
10354         /* Initialize data/descriptor byte/word swapping. */
10355         val = tr32(GRC_MODE);
10356         val &= GRC_MODE_HOST_STACKUP;
10357         tw32(GRC_MODE, val | tp->grc_mode);
10358
10359         tg3_switch_clocks(tp);
10360
10361         /* Clear this out for sanity. */
10362         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10363
10364         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10365                               &pci_state_reg);
10366         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10367             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10368                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10369
10370                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10371                     chiprevid == CHIPREV_ID_5701_B0 ||
10372                     chiprevid == CHIPREV_ID_5701_B2 ||
10373                     chiprevid == CHIPREV_ID_5701_B5) {
10374                         void __iomem *sram_base;
10375
10376                         /* Write some dummy words into the SRAM status block
10377                          * area, see if it reads back correctly.  If the return
10378                          * value is bad, force enable the PCIX workaround.
10379                          */
10380                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10381
10382                         writel(0x00000000, sram_base);
10383                         writel(0x00000000, sram_base + 4);
10384                         writel(0xffffffff, sram_base + 4);
10385                         if (readl(sram_base) != 0x00000000)
10386                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10387                 }
10388         }
10389
10390         udelay(50);
10391         tg3_nvram_init(tp);
10392
10393         grc_misc_cfg = tr32(GRC_MISC_CFG);
10394         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10395
10396         /* Broadcom's driver says that CIOBE multisplit has a bug */
10397 #if 0
10398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10399             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10400                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10401                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10402         }
10403 #endif
10404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10405             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10406              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10407                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10408
10409         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10410             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10411                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10412         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10413                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10414                                       HOSTCC_MODE_CLRTICK_TXBD);
10415
10416                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10417                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10418                                        tp->misc_host_ctrl);
10419         }
10420
10421         /* these are limited to 10/100 only */
10422         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10423              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10424             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10425              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10426              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10427               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10428               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10429             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10430              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10431               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10432                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10433
10434         err = tg3_phy_probe(tp);
10435         if (err) {
10436                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10437                        pci_name(tp->pdev), err);
10438                 /* ... but do not return immediately ... */
10439         }
10440
10441         tg3_read_partno(tp);
10442         tg3_read_fw_ver(tp);
10443
10444         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10445                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10446         } else {
10447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10448                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10449                 else
10450                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10451         }
10452
10453         /* 5700 {AX,BX} chips have a broken status block link
10454          * change bit implementation, so we must use the
10455          * status register in those cases.
10456          */
10457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10458                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10459         else
10460                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10461
10462         /* The led_ctrl is set during tg3_phy_probe, here we might
10463          * have to force the link status polling mechanism based
10464          * upon subsystem IDs.
10465          */
10466         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10467             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10468                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10469                                   TG3_FLAG_USE_LINKCHG_REG);
10470         }
10471
10472         /* For all SERDES we poll the MAC status register. */
10473         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10474                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10475         else
10476                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10477
10478         /* All chips before 5787 can get confused if TX buffers
10479          * straddle the 4GB address boundary in some cases.
10480          */
10481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10483                 tp->dev->hard_start_xmit = tg3_start_xmit;
10484         else
10485                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10486
10487         tp->rx_offset = 2;
10488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10489             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10490                 tp->rx_offset = 0;
10491
10492         /* By default, disable wake-on-lan.  User can change this
10493          * using ETHTOOL_SWOL.
10494          */
10495         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10496
10497         return err;
10498 }
10499
10500 #ifdef CONFIG_SPARC64
10501 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10502 {
10503         struct net_device *dev = tp->dev;
10504         struct pci_dev *pdev = tp->pdev;
10505         struct pcidev_cookie *pcp = pdev->sysdata;
10506
10507         if (pcp != NULL) {
10508                 int node = pcp->prom_node;
10509
10510                 if (prom_getproplen(node, "local-mac-address") == 6) {
10511                         prom_getproperty(node, "local-mac-address",
10512                                          dev->dev_addr, 6);
10513                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10514                         return 0;
10515                 }
10516         }
10517         return -ENODEV;
10518 }
10519
10520 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10521 {
10522         struct net_device *dev = tp->dev;
10523
10524         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10525         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10526         return 0;
10527 }
10528 #endif
10529
10530 static int __devinit tg3_get_device_address(struct tg3 *tp)
10531 {
10532         struct net_device *dev = tp->dev;
10533         u32 hi, lo, mac_offset;
10534
10535 #ifdef CONFIG_SPARC64
10536         if (!tg3_get_macaddr_sparc(tp))
10537                 return 0;
10538 #endif
10539
10540         mac_offset = 0x7c;
10541         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10542              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10543             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10544                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10545                         mac_offset = 0xcc;
10546                 if (tg3_nvram_lock(tp))
10547                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10548                 else
10549                         tg3_nvram_unlock(tp);
10550         }
10551
10552         /* First try to get it from MAC address mailbox. */
10553         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10554         if ((hi >> 16) == 0x484b) {
10555                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10556                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10557
10558                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10559                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10560                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10561                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10562                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10563         }
10564         /* Next, try NVRAM. */
10565         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10566                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10567                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10568                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10569                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10570                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10571                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10572                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10573                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10574         }
10575         /* Finally just fetch it out of the MAC control regs. */
10576         else {
10577                 hi = tr32(MAC_ADDR_0_HIGH);
10578                 lo = tr32(MAC_ADDR_0_LOW);
10579
10580                 dev->dev_addr[5] = lo & 0xff;
10581                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10582                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10583                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10584                 dev->dev_addr[1] = hi & 0xff;
10585                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10586         }
10587
10588         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10589 #ifdef CONFIG_SPARC64
10590                 if (!tg3_get_default_macaddr_sparc(tp))
10591                         return 0;
10592 #endif
10593                 return -EINVAL;
10594         }
10595         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10596         return 0;
10597 }
10598
10599 #define BOUNDARY_SINGLE_CACHELINE       1
10600 #define BOUNDARY_MULTI_CACHELINE        2
10601
10602 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10603 {
10604         int cacheline_size;
10605         u8 byte;
10606         int goal;
10607
10608         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10609         if (byte == 0)
10610                 cacheline_size = 1024;
10611         else
10612                 cacheline_size = (int) byte * 4;
10613
10614         /* On 5703 and later chips, the boundary bits have no
10615          * effect.
10616          */
10617         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10618             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10619             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10620                 goto out;
10621
10622 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10623         goal = BOUNDARY_MULTI_CACHELINE;
10624 #else
10625 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10626         goal = BOUNDARY_SINGLE_CACHELINE;
10627 #else
10628         goal = 0;
10629 #endif
10630 #endif
10631
10632         if (!goal)
10633                 goto out;
10634
10635         /* PCI controllers on most RISC systems tend to disconnect
10636          * when a device tries to burst across a cache-line boundary.
10637          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10638          *
10639          * Unfortunately, for PCI-E there are only limited
10640          * write-side controls for this, and thus for reads
10641          * we will still get the disconnects.  We'll also waste
10642          * these PCI cycles for both read and write for chips
10643          * other than 5700 and 5701 which do not implement the
10644          * boundary bits.
10645          */
10646         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10647             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10648                 switch (cacheline_size) {
10649                 case 16:
10650                 case 32:
10651                 case 64:
10652                 case 128:
10653                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10654                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10655                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10656                         } else {
10657                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10658                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10659                         }
10660                         break;
10661
10662                 case 256:
10663                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10664                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10665                         break;
10666
10667                 default:
10668                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10669                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10670                         break;
10671                 };
10672         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10673                 switch (cacheline_size) {
10674                 case 16:
10675                 case 32:
10676                 case 64:
10677                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10678                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10679                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10680                                 break;
10681                         }
10682                         /* fallthrough */
10683                 case 128:
10684                 default:
10685                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10686                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10687                         break;
10688                 };
10689         } else {
10690                 switch (cacheline_size) {
10691                 case 16:
10692                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10693                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10694                                         DMA_RWCTRL_WRITE_BNDRY_16);
10695                                 break;
10696                         }
10697                         /* fallthrough */
10698                 case 32:
10699                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10700                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10701                                         DMA_RWCTRL_WRITE_BNDRY_32);
10702                                 break;
10703                         }
10704                         /* fallthrough */
10705                 case 64:
10706                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10707                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10708                                         DMA_RWCTRL_WRITE_BNDRY_64);
10709                                 break;
10710                         }
10711                         /* fallthrough */
10712                 case 128:
10713                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10714                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10715                                         DMA_RWCTRL_WRITE_BNDRY_128);
10716                                 break;
10717                         }
10718                         /* fallthrough */
10719                 case 256:
10720                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10721                                 DMA_RWCTRL_WRITE_BNDRY_256);
10722                         break;
10723                 case 512:
10724                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10725                                 DMA_RWCTRL_WRITE_BNDRY_512);
10726                         break;
10727                 case 1024:
10728                 default:
10729                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10730                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10731                         break;
10732                 };
10733         }
10734
10735 out:
10736         return val;
10737 }
10738
10739 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10740 {
10741         struct tg3_internal_buffer_desc test_desc;
10742         u32 sram_dma_descs;
10743         int i, ret;
10744
10745         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10746
10747         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10748         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10749         tw32(RDMAC_STATUS, 0);
10750         tw32(WDMAC_STATUS, 0);
10751
10752         tw32(BUFMGR_MODE, 0);
10753         tw32(FTQ_RESET, 0);
10754
10755         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10756         test_desc.addr_lo = buf_dma & 0xffffffff;
10757         test_desc.nic_mbuf = 0x00002100;
10758         test_desc.len = size;
10759
10760         /*
10761          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10762          * the *second* time the tg3 driver was getting loaded after an
10763          * initial scan.
10764          *
10765          * Broadcom tells me:
10766          *   ...the DMA engine is connected to the GRC block and a DMA
10767          *   reset may affect the GRC block in some unpredictable way...
10768          *   The behavior of resets to individual blocks has not been tested.
10769          *
10770          * Broadcom noted the GRC reset will also reset all sub-components.
10771          */
10772         if (to_device) {
10773                 test_desc.cqid_sqid = (13 << 8) | 2;
10774
10775                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10776                 udelay(40);
10777         } else {
10778                 test_desc.cqid_sqid = (16 << 8) | 7;
10779
10780                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10781                 udelay(40);
10782         }
10783         test_desc.flags = 0x00000005;
10784
10785         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10786                 u32 val;
10787
10788                 val = *(((u32 *)&test_desc) + i);
10789                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10790                                        sram_dma_descs + (i * sizeof(u32)));
10791                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10792         }
10793         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10794
10795         if (to_device) {
10796                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10797         } else {
10798                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10799         }
10800
10801         ret = -ENODEV;
10802         for (i = 0; i < 40; i++) {
10803                 u32 val;
10804
10805                 if (to_device)
10806                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10807                 else
10808                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10809                 if ((val & 0xffff) == sram_dma_descs) {
10810                         ret = 0;
10811                         break;
10812                 }
10813
10814                 udelay(100);
10815         }
10816
10817         return ret;
10818 }
10819
10820 #define TEST_BUFFER_SIZE        0x2000
10821
10822 static int __devinit tg3_test_dma(struct tg3 *tp)
10823 {
10824         dma_addr_t buf_dma;
10825         u32 *buf, saved_dma_rwctrl;
10826         int ret;
10827
10828         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10829         if (!buf) {
10830                 ret = -ENOMEM;
10831                 goto out_nofree;
10832         }
10833
10834         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10835                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10836
10837         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10838
10839         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10840                 /* DMA read watermark not used on PCIE */
10841                 tp->dma_rwctrl |= 0x00180000;
10842         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10843                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10844                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10845                         tp->dma_rwctrl |= 0x003f0000;
10846                 else
10847                         tp->dma_rwctrl |= 0x003f000f;
10848         } else {
10849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10850                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10851                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10852
10853                         /* If the 5704 is behind the EPB bridge, we can
10854                          * do the less restrictive ONE_DMA workaround for
10855                          * better performance.
10856                          */
10857                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10858                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10859                                 tp->dma_rwctrl |= 0x8000;
10860                         else if (ccval == 0x6 || ccval == 0x7)
10861                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10862
10863                         /* Set bit 23 to enable PCIX hw bug fix */
10864                         tp->dma_rwctrl |= 0x009f0000;
10865                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10866                         /* 5780 always in PCIX mode */
10867                         tp->dma_rwctrl |= 0x00144000;
10868                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10869                         /* 5714 always in PCIX mode */
10870                         tp->dma_rwctrl |= 0x00148000;
10871                 } else {
10872                         tp->dma_rwctrl |= 0x001b000f;
10873                 }
10874         }
10875
10876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10878                 tp->dma_rwctrl &= 0xfffffff0;
10879
10880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10881             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10882                 /* Remove this if it causes problems for some boards. */
10883                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10884
10885                 /* On 5700/5701 chips, we need to set this bit.
10886                  * Otherwise the chip will issue cacheline transactions
10887                  * to streamable DMA memory with not all the byte
10888                  * enables turned on.  This is an error on several
10889                  * RISC PCI controllers, in particular sparc64.
10890                  *
10891                  * On 5703/5704 chips, this bit has been reassigned
10892                  * a different meaning.  In particular, it is used
10893                  * on those chips to enable a PCI-X workaround.
10894                  */
10895                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10896         }
10897
10898         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10899
10900 #if 0
10901         /* Unneeded, already done by tg3_get_invariants.  */
10902         tg3_switch_clocks(tp);
10903 #endif
10904
10905         ret = 0;
10906         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10907             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10908                 goto out;
10909
10910         /* It is best to perform DMA test with maximum write burst size
10911          * to expose the 5700/5701 write DMA bug.
10912          */
10913         saved_dma_rwctrl = tp->dma_rwctrl;
10914         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10915         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10916
10917         while (1) {
10918                 u32 *p = buf, i;
10919
10920                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10921                         p[i] = i;
10922
10923                 /* Send the buffer to the chip. */
10924                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10925                 if (ret) {
10926                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10927                         break;
10928                 }
10929
10930 #if 0
10931                 /* validate data reached card RAM correctly. */
10932                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10933                         u32 val;
10934                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10935                         if (le32_to_cpu(val) != p[i]) {
10936                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10937                                 /* ret = -ENODEV here? */
10938                         }
10939                         p[i] = 0;
10940                 }
10941 #endif
10942                 /* Now read it back. */
10943                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10944                 if (ret) {
10945                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10946
10947                         break;
10948                 }
10949
10950                 /* Verify it. */
10951                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10952                         if (p[i] == i)
10953                                 continue;
10954
10955                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10956                             DMA_RWCTRL_WRITE_BNDRY_16) {
10957                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10958                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10959                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10960                                 break;
10961                         } else {
10962                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10963                                 ret = -ENODEV;
10964                                 goto out;
10965                         }
10966                 }
10967
10968                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10969                         /* Success. */
10970                         ret = 0;
10971                         break;
10972                 }
10973         }
10974         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10975             DMA_RWCTRL_WRITE_BNDRY_16) {
10976                 static struct pci_device_id dma_wait_state_chipsets[] = {
10977                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10978                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10979                         { },
10980                 };
10981
10982                 /* DMA test passed without adjusting DMA boundary,
10983                  * now look for chipsets that are known to expose the
10984                  * DMA bug without failing the test.
10985                  */
10986                 if (pci_dev_present(dma_wait_state_chipsets)) {
10987                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10988                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10989                 }
10990                 else
10991                         /* Safe to use the calculated DMA boundary. */
10992                         tp->dma_rwctrl = saved_dma_rwctrl;
10993
10994                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10995         }
10996
10997 out:
10998         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10999 out_nofree:
11000         return ret;
11001 }
11002
11003 static void __devinit tg3_init_link_config(struct tg3 *tp)
11004 {
11005         tp->link_config.advertising =
11006                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11007                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11008                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11009                  ADVERTISED_Autoneg | ADVERTISED_MII);
11010         tp->link_config.speed = SPEED_INVALID;
11011         tp->link_config.duplex = DUPLEX_INVALID;
11012         tp->link_config.autoneg = AUTONEG_ENABLE;
11013         tp->link_config.active_speed = SPEED_INVALID;
11014         tp->link_config.active_duplex = DUPLEX_INVALID;
11015         tp->link_config.phy_is_low_power = 0;
11016         tp->link_config.orig_speed = SPEED_INVALID;
11017         tp->link_config.orig_duplex = DUPLEX_INVALID;
11018         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11019 }
11020
11021 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11022 {
11023         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11024                 tp->bufmgr_config.mbuf_read_dma_low_water =
11025                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11026                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11027                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11028                 tp->bufmgr_config.mbuf_high_water =
11029                         DEFAULT_MB_HIGH_WATER_5705;
11030
11031                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11032                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11033                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11034                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11035                 tp->bufmgr_config.mbuf_high_water_jumbo =
11036                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11037         } else {
11038                 tp->bufmgr_config.mbuf_read_dma_low_water =
11039                         DEFAULT_MB_RDMA_LOW_WATER;
11040                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11041                         DEFAULT_MB_MACRX_LOW_WATER;
11042                 tp->bufmgr_config.mbuf_high_water =
11043                         DEFAULT_MB_HIGH_WATER;
11044
11045                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11046                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11047                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11048                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11049                 tp->bufmgr_config.mbuf_high_water_jumbo =
11050                         DEFAULT_MB_HIGH_WATER_JUMBO;
11051         }
11052
11053         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11054         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11055 }
11056
11057 static char * __devinit tg3_phy_string(struct tg3 *tp)
11058 {
11059         switch (tp->phy_id & PHY_ID_MASK) {
11060         case PHY_ID_BCM5400:    return "5400";
11061         case PHY_ID_BCM5401:    return "5401";
11062         case PHY_ID_BCM5411:    return "5411";
11063         case PHY_ID_BCM5701:    return "5701";
11064         case PHY_ID_BCM5703:    return "5703";
11065         case PHY_ID_BCM5704:    return "5704";
11066         case PHY_ID_BCM5705:    return "5705";
11067         case PHY_ID_BCM5750:    return "5750";
11068         case PHY_ID_BCM5752:    return "5752";
11069         case PHY_ID_BCM5714:    return "5714";
11070         case PHY_ID_BCM5780:    return "5780";
11071         case PHY_ID_BCM5755:    return "5755";
11072         case PHY_ID_BCM5787:    return "5787";
11073         case PHY_ID_BCM8002:    return "8002/serdes";
11074         case 0:                 return "serdes";
11075         default:                return "unknown";
11076         };
11077 }
11078
11079 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11080 {
11081         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11082                 strcpy(str, "PCI Express");
11083                 return str;
11084         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11085                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11086
11087                 strcpy(str, "PCIX:");
11088
11089                 if ((clock_ctrl == 7) ||
11090                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11091                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11092                         strcat(str, "133MHz");
11093                 else if (clock_ctrl == 0)
11094                         strcat(str, "33MHz");
11095                 else if (clock_ctrl == 2)
11096                         strcat(str, "50MHz");
11097                 else if (clock_ctrl == 4)
11098                         strcat(str, "66MHz");
11099                 else if (clock_ctrl == 6)
11100                         strcat(str, "100MHz");
11101         } else {
11102                 strcpy(str, "PCI:");
11103                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11104                         strcat(str, "66MHz");
11105                 else
11106                         strcat(str, "33MHz");
11107         }
11108         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11109                 strcat(str, ":32-bit");
11110         else
11111                 strcat(str, ":64-bit");
11112         return str;
11113 }
11114
11115 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11116 {
11117         struct pci_dev *peer;
11118         unsigned int func, devnr = tp->pdev->devfn & ~7;
11119
11120         for (func = 0; func < 8; func++) {
11121                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11122                 if (peer && peer != tp->pdev)
11123                         break;
11124                 pci_dev_put(peer);
11125         }
11126         /* 5704 can be configured in single-port mode, set peer to
11127          * tp->pdev in that case.
11128          */
11129         if (!peer) {
11130                 peer = tp->pdev;
11131                 return peer;
11132         }
11133
11134         /*
11135          * We don't need to keep the refcount elevated; there's no way
11136          * to remove one half of this device without removing the other
11137          */
11138         pci_dev_put(peer);
11139
11140         return peer;
11141 }
11142
11143 static void __devinit tg3_init_coal(struct tg3 *tp)
11144 {
11145         struct ethtool_coalesce *ec = &tp->coal;
11146
11147         memset(ec, 0, sizeof(*ec));
11148         ec->cmd = ETHTOOL_GCOALESCE;
11149         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11150         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11151         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11152         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11153         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11154         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11155         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11156         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11157         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11158
11159         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11160                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11161                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11162                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11163                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11164                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11165         }
11166
11167         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11168                 ec->rx_coalesce_usecs_irq = 0;
11169                 ec->tx_coalesce_usecs_irq = 0;
11170                 ec->stats_block_coalesce_usecs = 0;
11171         }
11172 }
11173
11174 static int __devinit tg3_init_one(struct pci_dev *pdev,
11175                                   const struct pci_device_id *ent)
11176 {
11177         static int tg3_version_printed = 0;
11178         unsigned long tg3reg_base, tg3reg_len;
11179         struct net_device *dev;
11180         struct tg3 *tp;
11181         int i, err, pm_cap;
11182         char str[40];
11183         u64 dma_mask, persist_dma_mask;
11184
11185         if (tg3_version_printed++ == 0)
11186                 printk(KERN_INFO "%s", version);
11187
11188         err = pci_enable_device(pdev);
11189         if (err) {
11190                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11191                        "aborting.\n");
11192                 return err;
11193         }
11194
11195         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11196                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11197                        "base address, aborting.\n");
11198                 err = -ENODEV;
11199                 goto err_out_disable_pdev;
11200         }
11201
11202         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11203         if (err) {
11204                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11205                        "aborting.\n");
11206                 goto err_out_disable_pdev;
11207         }
11208
11209         pci_set_master(pdev);
11210
11211         /* Find power-management capability. */
11212         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11213         if (pm_cap == 0) {
11214                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11215                        "aborting.\n");
11216                 err = -EIO;
11217                 goto err_out_free_res;
11218         }
11219
11220         tg3reg_base = pci_resource_start(pdev, 0);
11221         tg3reg_len = pci_resource_len(pdev, 0);
11222
11223         dev = alloc_etherdev(sizeof(*tp));
11224         if (!dev) {
11225                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11226                 err = -ENOMEM;
11227                 goto err_out_free_res;
11228         }
11229
11230         SET_MODULE_OWNER(dev);
11231         SET_NETDEV_DEV(dev, &pdev->dev);
11232
11233         dev->features |= NETIF_F_LLTX;
11234 #if TG3_VLAN_TAG_USED
11235         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11236         dev->vlan_rx_register = tg3_vlan_rx_register;
11237         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11238 #endif
11239
11240         tp = netdev_priv(dev);
11241         tp->pdev = pdev;
11242         tp->dev = dev;
11243         tp->pm_cap = pm_cap;
11244         tp->mac_mode = TG3_DEF_MAC_MODE;
11245         tp->rx_mode = TG3_DEF_RX_MODE;
11246         tp->tx_mode = TG3_DEF_TX_MODE;
11247         tp->mi_mode = MAC_MI_MODE_BASE;
11248         if (tg3_debug > 0)
11249                 tp->msg_enable = tg3_debug;
11250         else
11251                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11252
11253         /* The word/byte swap controls here control register access byte
11254          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11255          * setting below.
11256          */
11257         tp->misc_host_ctrl =
11258                 MISC_HOST_CTRL_MASK_PCI_INT |
11259                 MISC_HOST_CTRL_WORD_SWAP |
11260                 MISC_HOST_CTRL_INDIR_ACCESS |
11261                 MISC_HOST_CTRL_PCISTATE_RW;
11262
11263         /* The NONFRM (non-frame) byte/word swap controls take effect
11264          * on descriptor entries, anything which isn't packet data.
11265          *
11266          * The StrongARM chips on the board (one for tx, one for rx)
11267          * are running in big-endian mode.
11268          */
11269         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11270                         GRC_MODE_WSWAP_NONFRM_DATA);
11271 #ifdef __BIG_ENDIAN
11272         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11273 #endif
11274         spin_lock_init(&tp->lock);
11275         spin_lock_init(&tp->tx_lock);
11276         spin_lock_init(&tp->indirect_lock);
11277         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11278
11279         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11280         if (tp->regs == 0UL) {
11281                 printk(KERN_ERR PFX "Cannot map device registers, "
11282                        "aborting.\n");
11283                 err = -ENOMEM;
11284                 goto err_out_free_dev;
11285         }
11286
11287         tg3_init_link_config(tp);
11288
11289         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11290         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11291         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11292
11293         dev->open = tg3_open;
11294         dev->stop = tg3_close;
11295         dev->get_stats = tg3_get_stats;
11296         dev->set_multicast_list = tg3_set_rx_mode;
11297         dev->set_mac_address = tg3_set_mac_addr;
11298         dev->do_ioctl = tg3_ioctl;
11299         dev->tx_timeout = tg3_tx_timeout;
11300         dev->poll = tg3_poll;
11301         dev->ethtool_ops = &tg3_ethtool_ops;
11302         dev->weight = 64;
11303         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11304         dev->change_mtu = tg3_change_mtu;
11305         dev->irq = pdev->irq;
11306 #ifdef CONFIG_NET_POLL_CONTROLLER
11307         dev->poll_controller = tg3_poll_controller;
11308 #endif
11309
11310         err = tg3_get_invariants(tp);
11311         if (err) {
11312                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11313                        "aborting.\n");
11314                 goto err_out_iounmap;
11315         }
11316
11317         /* The EPB bridge inside 5714, 5715, and 5780 and any
11318          * device behind the EPB cannot support DMA addresses > 40-bit.
11319          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11320          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11321          * do DMA address check in tg3_start_xmit().
11322          */
11323         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11324                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11325         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11326                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11327 #ifdef CONFIG_HIGHMEM
11328                 dma_mask = DMA_64BIT_MASK;
11329 #endif
11330         } else
11331                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11332
11333         /* Configure DMA attributes. */
11334         if (dma_mask > DMA_32BIT_MASK) {
11335                 err = pci_set_dma_mask(pdev, dma_mask);
11336                 if (!err) {
11337                         dev->features |= NETIF_F_HIGHDMA;
11338                         err = pci_set_consistent_dma_mask(pdev,
11339                                                           persist_dma_mask);
11340                         if (err < 0) {
11341                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11342                                        "DMA for consistent allocations\n");
11343                                 goto err_out_iounmap;
11344                         }
11345                 }
11346         }
11347         if (err || dma_mask == DMA_32BIT_MASK) {
11348                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11349                 if (err) {
11350                         printk(KERN_ERR PFX "No usable DMA configuration, "
11351                                "aborting.\n");
11352                         goto err_out_iounmap;
11353                 }
11354         }
11355
11356         tg3_init_bufmgr_config(tp);
11357
11358 #if TG3_TSO_SUPPORT != 0
11359         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11360                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11361         }
11362         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11364             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11365             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11366                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11367         } else {
11368                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11369         }
11370
11371         /* TSO is on by default on chips that support hardware TSO.
11372          * Firmware TSO on older chips gives lower performance, so it
11373          * is off by default, but can be enabled using ethtool.
11374          */
11375         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11376                 dev->features |= NETIF_F_TSO;
11377
11378 #endif
11379
11380         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11381             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11382             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11383                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11384                 tp->rx_pending = 63;
11385         }
11386
11387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11388             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11389                 tp->pdev_peer = tg3_find_peer(tp);
11390
11391         err = tg3_get_device_address(tp);
11392         if (err) {
11393                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11394                        "aborting.\n");
11395                 goto err_out_iounmap;
11396         }
11397
11398         /*
11399          * Reset chip in case UNDI or EFI driver did not shutdown
11400          * DMA self test will enable WDMAC and we'll see (spurious)
11401          * pending DMA on the PCI bus at that point.
11402          */
11403         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11404             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11405                 pci_save_state(tp->pdev);
11406                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11407                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11408         }
11409
11410         err = tg3_test_dma(tp);
11411         if (err) {
11412                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11413                 goto err_out_iounmap;
11414         }
11415
11416         /* Tigon3 can do ipv4 only... and some chips have buggy
11417          * checksumming.
11418          */
11419         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11420                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11421                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11422                         dev->features |= NETIF_F_HW_CSUM;
11423                 else
11424                         dev->features |= NETIF_F_IP_CSUM;
11425                 dev->features |= NETIF_F_SG;
11426                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11427         } else
11428                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11429
11430         /* flow control autonegotiation is default behavior */
11431         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11432
11433         tg3_init_coal(tp);
11434
11435         /* Now that we have fully setup the chip, save away a snapshot
11436          * of the PCI config space.  We need to restore this after
11437          * GRC_MISC_CFG core clock resets and some resume events.
11438          */
11439         pci_save_state(tp->pdev);
11440
11441         err = register_netdev(dev);
11442         if (err) {
11443                 printk(KERN_ERR PFX "Cannot register net device, "
11444                        "aborting.\n");
11445                 goto err_out_iounmap;
11446         }
11447
11448         pci_set_drvdata(pdev, dev);
11449
11450         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11451                dev->name,
11452                tp->board_part_number,
11453                tp->pci_chip_rev_id,
11454                tg3_phy_string(tp),
11455                tg3_bus_string(tp, str),
11456                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11457
11458         for (i = 0; i < 6; i++)
11459                 printk("%2.2x%c", dev->dev_addr[i],
11460                        i == 5 ? '\n' : ':');
11461
11462         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11463                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11464                "TSOcap[%d] \n",
11465                dev->name,
11466                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11467                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11468                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11469                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11470                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11471                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11472                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11473         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11474                dev->name, tp->dma_rwctrl,
11475                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11476                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11477
11478         netif_carrier_off(tp->dev);
11479
11480         return 0;
11481
11482 err_out_iounmap:
11483         if (tp->regs) {
11484                 iounmap(tp->regs);
11485                 tp->regs = NULL;
11486         }
11487
11488 err_out_free_dev:
11489         free_netdev(dev);
11490
11491 err_out_free_res:
11492         pci_release_regions(pdev);
11493
11494 err_out_disable_pdev:
11495         pci_disable_device(pdev);
11496         pci_set_drvdata(pdev, NULL);
11497         return err;
11498 }
11499
11500 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11501 {
11502         struct net_device *dev = pci_get_drvdata(pdev);
11503
11504         if (dev) {
11505                 struct tg3 *tp = netdev_priv(dev);
11506
11507                 flush_scheduled_work();
11508                 unregister_netdev(dev);
11509                 if (tp->regs) {
11510                         iounmap(tp->regs);
11511                         tp->regs = NULL;
11512                 }
11513                 free_netdev(dev);
11514                 pci_release_regions(pdev);
11515                 pci_disable_device(pdev);
11516                 pci_set_drvdata(pdev, NULL);
11517         }
11518 }
11519
11520 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11521 {
11522         struct net_device *dev = pci_get_drvdata(pdev);
11523         struct tg3 *tp = netdev_priv(dev);
11524         int err;
11525
11526         if (!netif_running(dev))
11527                 return 0;
11528
11529         flush_scheduled_work();
11530         tg3_netif_stop(tp);
11531
11532         del_timer_sync(&tp->timer);
11533
11534         tg3_full_lock(tp, 1);
11535         tg3_disable_ints(tp);
11536         tg3_full_unlock(tp);
11537
11538         netif_device_detach(dev);
11539
11540         tg3_full_lock(tp, 0);
11541         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11542         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11543         tg3_full_unlock(tp);
11544
11545         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11546         if (err) {
11547                 tg3_full_lock(tp, 0);
11548
11549                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11550                 tg3_init_hw(tp);
11551
11552                 tp->timer.expires = jiffies + tp->timer_offset;
11553                 add_timer(&tp->timer);
11554
11555                 netif_device_attach(dev);
11556                 tg3_netif_start(tp);
11557
11558                 tg3_full_unlock(tp);
11559         }
11560
11561         return err;
11562 }
11563
11564 static int tg3_resume(struct pci_dev *pdev)
11565 {
11566         struct net_device *dev = pci_get_drvdata(pdev);
11567         struct tg3 *tp = netdev_priv(dev);
11568         int err;
11569
11570         if (!netif_running(dev))
11571                 return 0;
11572
11573         pci_restore_state(tp->pdev);
11574
11575         err = tg3_set_power_state(tp, PCI_D0);
11576         if (err)
11577                 return err;
11578
11579         netif_device_attach(dev);
11580
11581         tg3_full_lock(tp, 0);
11582
11583         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11584         tg3_init_hw(tp);
11585
11586         tp->timer.expires = jiffies + tp->timer_offset;
11587         add_timer(&tp->timer);
11588
11589         tg3_netif_start(tp);
11590
11591         tg3_full_unlock(tp);
11592
11593         return 0;
11594 }
11595
11596 static struct pci_driver tg3_driver = {
11597         .name           = DRV_MODULE_NAME,
11598         .id_table       = tg3_pci_tbl,
11599         .probe          = tg3_init_one,
11600         .remove         = __devexit_p(tg3_remove_one),
11601         .suspend        = tg3_suspend,
11602         .resume         = tg3_resume
11603 };
11604
11605 static int __init tg3_init(void)
11606 {
11607         return pci_module_init(&tg3_driver);
11608 }
11609
11610 static void __exit tg3_cleanup(void)
11611 {
11612         pci_unregister_driver(&tg3_driver);
11613 }
11614
11615 module_init(tg3_init);
11616 module_exit(tg3_cleanup);