Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy...
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.64"
72 #define DRV_MODULE_RELDATE      "July 31, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265         { 0, }
266 };
267
268 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
269
270 static struct {
271         const char string[ETH_GSTRING_LEN];
272 } ethtool_stats_keys[TG3_NUM_STATS] = {
273         { "rx_octets" },
274         { "rx_fragments" },
275         { "rx_ucast_packets" },
276         { "rx_mcast_packets" },
277         { "rx_bcast_packets" },
278         { "rx_fcs_errors" },
279         { "rx_align_errors" },
280         { "rx_xon_pause_rcvd" },
281         { "rx_xoff_pause_rcvd" },
282         { "rx_mac_ctrl_rcvd" },
283         { "rx_xoff_entered" },
284         { "rx_frame_too_long_errors" },
285         { "rx_jabbers" },
286         { "rx_undersize_packets" },
287         { "rx_in_length_errors" },
288         { "rx_out_length_errors" },
289         { "rx_64_or_less_octet_packets" },
290         { "rx_65_to_127_octet_packets" },
291         { "rx_128_to_255_octet_packets" },
292         { "rx_256_to_511_octet_packets" },
293         { "rx_512_to_1023_octet_packets" },
294         { "rx_1024_to_1522_octet_packets" },
295         { "rx_1523_to_2047_octet_packets" },
296         { "rx_2048_to_4095_octet_packets" },
297         { "rx_4096_to_8191_octet_packets" },
298         { "rx_8192_to_9022_octet_packets" },
299
300         { "tx_octets" },
301         { "tx_collisions" },
302
303         { "tx_xon_sent" },
304         { "tx_xoff_sent" },
305         { "tx_flow_control" },
306         { "tx_mac_errors" },
307         { "tx_single_collisions" },
308         { "tx_mult_collisions" },
309         { "tx_deferred" },
310         { "tx_excessive_collisions" },
311         { "tx_late_collisions" },
312         { "tx_collide_2times" },
313         { "tx_collide_3times" },
314         { "tx_collide_4times" },
315         { "tx_collide_5times" },
316         { "tx_collide_6times" },
317         { "tx_collide_7times" },
318         { "tx_collide_8times" },
319         { "tx_collide_9times" },
320         { "tx_collide_10times" },
321         { "tx_collide_11times" },
322         { "tx_collide_12times" },
323         { "tx_collide_13times" },
324         { "tx_collide_14times" },
325         { "tx_collide_15times" },
326         { "tx_ucast_packets" },
327         { "tx_mcast_packets" },
328         { "tx_bcast_packets" },
329         { "tx_carrier_sense_errors" },
330         { "tx_discards" },
331         { "tx_errors" },
332
333         { "dma_writeq_full" },
334         { "dma_write_prioq_full" },
335         { "rxbds_empty" },
336         { "rx_discards" },
337         { "rx_errors" },
338         { "rx_threshold_hit" },
339
340         { "dma_readq_full" },
341         { "dma_read_prioq_full" },
342         { "tx_comp_queue_full" },
343
344         { "ring_set_send_prod_index" },
345         { "ring_status_update" },
346         { "nic_irqs" },
347         { "nic_avoided_irqs" },
348         { "nic_tx_threshold_hit" }
349 };
350
351 static struct {
352         const char string[ETH_GSTRING_LEN];
353 } ethtool_test_keys[TG3_NUM_TEST] = {
354         { "nvram test     (online) " },
355         { "link test      (online) " },
356         { "register test  (offline)" },
357         { "memory test    (offline)" },
358         { "loopback test  (offline)" },
359         { "interrupt test (offline)" },
360 };
361
362 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
363 {
364         writel(val, tp->regs + off);
365 }
366
367 static u32 tg3_read32(struct tg3 *tp, u32 off)
368 {
369         return (readl(tp->regs + off)); 
370 }
371
372 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 }
381
382 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
383 {
384         writel(val, tp->regs + off);
385         readl(tp->regs + off);
386 }
387
388 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
401 {
402         unsigned long flags;
403
404         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411                                        TG3_64BIT_REG_LOW, val);
412                 return;
413         }
414
415         spin_lock_irqsave(&tp->indirect_lock, flags);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418         spin_unlock_irqrestore(&tp->indirect_lock, flags);
419
420         /* In indirect mode when disabling interrupts, we also need
421          * to clear the interrupt bit in the GRC local ctrl register.
422          */
423         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
424             (val == 0x1)) {
425                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
427         }
428 }
429
430 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
431 {
432         unsigned long flags;
433         u32 val;
434
435         spin_lock_irqsave(&tp->indirect_lock, flags);
436         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438         spin_unlock_irqrestore(&tp->indirect_lock, flags);
439         return val;
440 }
441
442 /* usec_wait specifies the wait time in usec when writing to certain registers
443  * where it is unsafe to read back the register without some delay.
444  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
446  */
447 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
448 {
449         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451                 /* Non-posted methods */
452                 tp->write32(tp, off, val);
453         else {
454                 /* Posted method */
455                 tg3_write32(tp, off, val);
456                 if (usec_wait)
457                         udelay(usec_wait);
458                 tp->read32(tp, off);
459         }
460         /* Wait again after the read for the posted method to guarantee that
461          * the wait time is met.
462          */
463         if (usec_wait)
464                 udelay(usec_wait);
465 }
466
467 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
468 {
469         tp->write32_mbox(tp, off, val);
470         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472                 tp->read32_mbox(tp, off);
473 }
474
475 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
476 {
477         void __iomem *mbox = tp->regs + off;
478         writel(val, mbox);
479         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
480                 writel(val, mbox);
481         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
482                 readl(mbox);
483 }
484
485 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
490
491 #define tw32(reg,val)           tp->write32(tp, reg, val)
492 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg)               tp->read32(tp, reg)
495
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497 {
498         unsigned long flags;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504
505                 /* Always leave this as zero. */
506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507         } else {
508                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
510
511                 /* Always leave this as zero. */
512                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513         }
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 }
516
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518 {
519         unsigned long flags;
520
521         spin_lock_irqsave(&tp->indirect_lock, flags);
522         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
525
526                 /* Always leave this as zero. */
527                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
528         } else {
529                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530                 *val = tr32(TG3PCI_MEM_WIN_DATA);
531
532                 /* Always leave this as zero. */
533                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534         }
535         spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 }
537
538 static void tg3_disable_ints(struct tg3 *tp)
539 {
540         tw32(TG3PCI_MISC_HOST_CTRL,
541              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
542         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
543 }
544
545 static inline void tg3_cond_int(struct tg3 *tp)
546 {
547         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548             (tp->hw_status->status & SD_STATUS_UPDATED))
549                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
550 }
551
552 static void tg3_enable_ints(struct tg3 *tp)
553 {
554         tp->irq_sync = 0;
555         wmb();
556
557         tw32(TG3PCI_MISC_HOST_CTRL,
558              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
559         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560                        (tp->last_tag << 24));
561         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                                (tp->last_tag << 24));
564         tg3_cond_int(tp);
565 }
566
567 static inline unsigned int tg3_has_work(struct tg3 *tp)
568 {
569         struct tg3_hw_status *sblk = tp->hw_status;
570         unsigned int work_exists = 0;
571
572         /* check for phy events */
573         if (!(tp->tg3_flags &
574               (TG3_FLAG_USE_LINKCHG_REG |
575                TG3_FLAG_POLL_SERDES))) {
576                 if (sblk->status & SD_STATUS_LINK_CHG)
577                         work_exists = 1;
578         }
579         /* check for RX/TX work to do */
580         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
582                 work_exists = 1;
583
584         return work_exists;
585 }
586
587 /* tg3_restart_ints
588  *  similar to tg3_enable_ints, but it accurately determines whether there
589  *  is new work pending and can return without flushing the PIO write
590  *  which reenables interrupts 
591  */
592 static void tg3_restart_ints(struct tg3 *tp)
593 {
594         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
595                      tp->last_tag << 24);
596         mmiowb();
597
598         /* When doing tagged status, this work check is unnecessary.
599          * The last_tag we write above tells the chip which piece of
600          * work we've completed.
601          */
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             tg3_has_work(tp))
604                 tw32(HOSTCC_MODE, tp->coalesce_mode |
605                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
606 }
607
608 static inline void tg3_netif_stop(struct tg3 *tp)
609 {
610         tp->dev->trans_start = jiffies; /* prevent tx timeout */
611         netif_poll_disable(tp->dev);
612         netif_tx_disable(tp->dev);
613 }
614
615 static inline void tg3_netif_start(struct tg3 *tp)
616 {
617         netif_wake_queue(tp->dev);
618         /* NOTE: unconditional netif_wake_queue is only appropriate
619          * so long as all callers are assured to have free tx slots
620          * (such as after tg3_init_hw)
621          */
622         netif_poll_enable(tp->dev);
623         tp->hw_status->status |= SD_STATUS_UPDATED;
624         tg3_enable_ints(tp);
625 }
626
627 static void tg3_switch_clocks(struct tg3 *tp)
628 {
629         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
630         u32 orig_clock_ctrl;
631
632         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
633                 return;
634
635         orig_clock_ctrl = clock_ctrl;
636         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637                        CLOCK_CTRL_CLKRUN_OENABLE |
638                        0x1f);
639         tp->pci_clock_ctrl = clock_ctrl;
640
641         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
643                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
644                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
645                 }
646         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
647                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
648                             clock_ctrl |
649                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
650                             40);
651                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
653                             40);
654         }
655         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
656 }
657
658 #define PHY_BUSY_LOOPS  5000
659
660 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
661 {
662         u32 frame_val;
663         unsigned int loops;
664         int ret;
665
666         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
667                 tw32_f(MAC_MI_MODE,
668                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
669                 udelay(80);
670         }
671
672         *val = 0x0;
673
674         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675                       MI_COM_PHY_ADDR_MASK);
676         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677                       MI_COM_REG_ADDR_MASK);
678         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
679         
680         tw32_f(MAC_MI_COM, frame_val);
681
682         loops = PHY_BUSY_LOOPS;
683         while (loops != 0) {
684                 udelay(10);
685                 frame_val = tr32(MAC_MI_COM);
686
687                 if ((frame_val & MI_COM_BUSY) == 0) {
688                         udelay(5);
689                         frame_val = tr32(MAC_MI_COM);
690                         break;
691                 }
692                 loops -= 1;
693         }
694
695         ret = -EBUSY;
696         if (loops != 0) {
697                 *val = frame_val & MI_COM_DATA_MASK;
698                 ret = 0;
699         }
700
701         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702                 tw32_f(MAC_MI_MODE, tp->mi_mode);
703                 udelay(80);
704         }
705
706         return ret;
707 }
708
709 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722                       MI_COM_PHY_ADDR_MASK);
723         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724                       MI_COM_REG_ADDR_MASK);
725         frame_val |= (val & MI_COM_DATA_MASK);
726         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
727         
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734                 if ((frame_val & MI_COM_BUSY) == 0) {
735                         udelay(5);
736                         frame_val = tr32(MAC_MI_COM);
737                         break;
738                 }
739                 loops -= 1;
740         }
741
742         ret = -EBUSY;
743         if (loops != 0)
744                 ret = 0;
745
746         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747                 tw32_f(MAC_MI_MODE, tp->mi_mode);
748                 udelay(80);
749         }
750
751         return ret;
752 }
753
754 static void tg3_phy_set_wirespeed(struct tg3 *tp)
755 {
756         u32 val;
757
758         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
759                 return;
760
761         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764                              (val | (1 << 15) | (1 << 4)));
765 }
766
767 static int tg3_bmcr_reset(struct tg3 *tp)
768 {
769         u32 phy_control;
770         int limit, err;
771
772         /* OK, reset it, and poll the BMCR_RESET bit until it
773          * clears or we time out.
774          */
775         phy_control = BMCR_RESET;
776         err = tg3_writephy(tp, MII_BMCR, phy_control);
777         if (err != 0)
778                 return -EBUSY;
779
780         limit = 5000;
781         while (limit--) {
782                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
783                 if (err != 0)
784                         return -EBUSY;
785
786                 if ((phy_control & BMCR_RESET) == 0) {
787                         udelay(40);
788                         break;
789                 }
790                 udelay(10);
791         }
792         if (limit <= 0)
793                 return -EBUSY;
794
795         return 0;
796 }
797
798 static int tg3_wait_macro_done(struct tg3 *tp)
799 {
800         int limit = 100;
801
802         while (limit--) {
803                 u32 tmp32;
804
805                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806                         if ((tmp32 & 0x1000) == 0)
807                                 break;
808                 }
809         }
810         if (limit <= 0)
811                 return -EBUSY;
812
813         return 0;
814 }
815
816 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
817 {
818         static const u32 test_pat[4][6] = {
819         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
823         };
824         int chan;
825
826         for (chan = 0; chan < 4; chan++) {
827                 int i;
828
829                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830                              (chan * 0x2000) | 0x0200);
831                 tg3_writephy(tp, 0x16, 0x0002);
832
833                 for (i = 0; i < 6; i++)
834                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
835                                      test_pat[chan][i]);
836
837                 tg3_writephy(tp, 0x16, 0x0202);
838                 if (tg3_wait_macro_done(tp)) {
839                         *resetp = 1;
840                         return -EBUSY;
841                 }
842
843                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844                              (chan * 0x2000) | 0x0200);
845                 tg3_writephy(tp, 0x16, 0x0082);
846                 if (tg3_wait_macro_done(tp)) {
847                         *resetp = 1;
848                         return -EBUSY;
849                 }
850
851                 tg3_writephy(tp, 0x16, 0x0802);
852                 if (tg3_wait_macro_done(tp)) {
853                         *resetp = 1;
854                         return -EBUSY;
855                 }
856
857                 for (i = 0; i < 6; i += 2) {
858                         u32 low, high;
859
860                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862                             tg3_wait_macro_done(tp)) {
863                                 *resetp = 1;
864                                 return -EBUSY;
865                         }
866                         low &= 0x7fff;
867                         high &= 0x000f;
868                         if (low != test_pat[chan][i] ||
869                             high != test_pat[chan][i+1]) {
870                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
873
874                                 return -EBUSY;
875                         }
876                 }
877         }
878
879         return 0;
880 }
881
882 static int tg3_phy_reset_chanpat(struct tg3 *tp)
883 {
884         int chan;
885
886         for (chan = 0; chan < 4; chan++) {
887                 int i;
888
889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890                              (chan * 0x2000) | 0x0200);
891                 tg3_writephy(tp, 0x16, 0x0002);
892                 for (i = 0; i < 6; i++)
893                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894                 tg3_writephy(tp, 0x16, 0x0202);
895                 if (tg3_wait_macro_done(tp))
896                         return -EBUSY;
897         }
898
899         return 0;
900 }
901
902 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
903 {
904         u32 reg32, phy9_orig;
905         int retries, do_phy_reset, err;
906
907         retries = 10;
908         do_phy_reset = 1;
909         do {
910                 if (do_phy_reset) {
911                         err = tg3_bmcr_reset(tp);
912                         if (err)
913                                 return err;
914                         do_phy_reset = 0;
915                 }
916
917                 /* Disable transmitter and interrupt.  */
918                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
919                         continue;
920
921                 reg32 |= 0x3000;
922                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
923
924                 /* Set full-duplex, 1000 mbps.  */
925                 tg3_writephy(tp, MII_BMCR,
926                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
927
928                 /* Set to master mode.  */
929                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
930                         continue;
931
932                 tg3_writephy(tp, MII_TG3_CTRL,
933                              (MII_TG3_CTRL_AS_MASTER |
934                               MII_TG3_CTRL_ENABLE_AS_MASTER));
935
936                 /* Enable SM_DSP_CLOCK and 6dB.  */
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
938
939                 /* Block the PHY control access.  */
940                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
942
943                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
944                 if (!err)
945                         break;
946         } while (--retries);
947
948         err = tg3_phy_reset_chanpat(tp);
949         if (err)
950                 return err;
951
952         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
954
955         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956         tg3_writephy(tp, 0x16, 0x0000);
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960                 /* Set Extended packet length bit for jumbo frames */
961                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
962         }
963         else {
964                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
965         }
966
967         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
968
969         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
970                 reg32 &= ~0x3000;
971                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
972         } else if (!err)
973                 err = -EBUSY;
974
975         return err;
976 }
977
978 static void tg3_link_report(struct tg3 *);
979
980 /* This will reset the tigon3 PHY if there is no valid
981  * link unless the FORCE argument is non-zero.
982  */
983 static int tg3_phy_reset(struct tg3 *tp)
984 {
985         u32 phy_status;
986         int err;
987
988         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
989         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
990         if (err != 0)
991                 return -EBUSY;
992
993         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994                 netif_carrier_off(tp->dev);
995                 tg3_link_report(tp);
996         }
997
998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001                 err = tg3_phy_reset_5703_4_5(tp);
1002                 if (err)
1003                         return err;
1004                 goto out;
1005         }
1006
1007         err = tg3_bmcr_reset(tp);
1008         if (err)
1009                 return err;
1010
1011 out:
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1019         }
1020         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021                 tg3_writephy(tp, 0x1c, 0x8d68);
1022                 tg3_writephy(tp, 0x1c, 0x8d68);
1023         }
1024         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1033         }
1034         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1039         }
1040         /* Set Extended packet length bit (bit 14) on all chips that */
1041         /* support jumbo frames */
1042         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043                 /* Cannot do read-modify-write on 5401 */
1044                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1045         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1046                 u32 phy_reg;
1047
1048                 /* Set bit 14 with read-modify-write to preserve other bits */
1049                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1052         }
1053
1054         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055          * jumbo frames transmission.
1056          */
1057         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1058                 u32 phy_reg;
1059
1060                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_setup_phy(struct tg3 *, int);
1166
1167 #define RESET_KIND_SHUTDOWN     0
1168 #define RESET_KIND_INIT         1
1169 #define RESET_KIND_SUSPEND      2
1170
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1175
1176 static void tg3_power_down_phy(struct tg3 *tp)
1177 {
1178         /* The PHY should not be powered down on some chips because
1179          * of bugs.
1180          */
1181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1185                 return;
1186         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1187 }
1188
1189 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1190 {
1191         u32 misc_host_ctrl;
1192         u16 power_control, power_caps;
1193         int pm = tp->pm_cap;
1194
1195         /* Make sure register accesses (indirect or otherwise)
1196          * will function correctly.
1197          */
1198         pci_write_config_dword(tp->pdev,
1199                                TG3PCI_MISC_HOST_CTRL,
1200                                tp->misc_host_ctrl);
1201
1202         pci_read_config_word(tp->pdev,
1203                              pm + PCI_PM_CTRL,
1204                              &power_control);
1205         power_control |= PCI_PM_CTRL_PME_STATUS;
1206         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1207         switch (state) {
1208         case PCI_D0:
1209                 power_control |= 0;
1210                 pci_write_config_word(tp->pdev,
1211                                       pm + PCI_PM_CTRL,
1212                                       power_control);
1213                 udelay(100);    /* Delay after power state change */
1214
1215                 /* Switch out of Vaux if it is not a LOM */
1216                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1218
1219                 return 0;
1220
1221         case PCI_D1:
1222                 power_control |= 1;
1223                 break;
1224
1225         case PCI_D2:
1226                 power_control |= 2;
1227                 break;
1228
1229         case PCI_D3hot:
1230                 power_control |= 3;
1231                 break;
1232
1233         default:
1234                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1235                        "requested.\n",
1236                        tp->dev->name, state);
1237                 return -EINVAL;
1238         };
1239
1240         power_control |= PCI_PM_CTRL_PME_ENABLE;
1241
1242         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243         tw32(TG3PCI_MISC_HOST_CTRL,
1244              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1245
1246         if (tp->link_config.phy_is_low_power == 0) {
1247                 tp->link_config.phy_is_low_power = 1;
1248                 tp->link_config.orig_speed = tp->link_config.speed;
1249                 tp->link_config.orig_duplex = tp->link_config.duplex;
1250                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1251         }
1252
1253         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1254                 tp->link_config.speed = SPEED_10;
1255                 tp->link_config.duplex = DUPLEX_HALF;
1256                 tp->link_config.autoneg = AUTONEG_ENABLE;
1257                 tg3_setup_phy(tp, 0);
1258         }
1259
1260         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1261                 int i;
1262                 u32 val;
1263
1264                 for (i = 0; i < 200; i++) {
1265                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1267                                 break;
1268                         msleep(1);
1269                 }
1270         }
1271         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272                                              WOL_DRV_STATE_SHUTDOWN |
1273                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1274
1275         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1276
1277         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1278                 u32 mac_mode;
1279
1280                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1282                         udelay(40);
1283
1284                         mac_mode = MAC_MODE_PORT_MODE_MII;
1285
1286                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1289                 } else {
1290                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1291                 }
1292
1293                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1294                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1295
1296                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1299
1300                 tw32_f(MAC_MODE, mac_mode);
1301                 udelay(100);
1302
1303                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1304                 udelay(10);
1305         }
1306
1307         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1310                 u32 base_val;
1311
1312                 base_val = tp->pci_clock_ctrl;
1313                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314                              CLOCK_CTRL_TXCLK_DISABLE);
1315
1316                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1318         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1319                 /* do nothing */
1320         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1321                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322                 u32 newbits1, newbits2;
1323
1324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327                                     CLOCK_CTRL_TXCLK_DISABLE |
1328                                     CLOCK_CTRL_ALTCLK);
1329                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331                         newbits1 = CLOCK_CTRL_625_CORE;
1332                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1333                 } else {
1334                         newbits1 = CLOCK_CTRL_ALTCLK;
1335                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1336                 }
1337
1338                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1339                             40);
1340
1341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1342                             40);
1343
1344                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1345                         u32 newbits3;
1346
1347                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350                                             CLOCK_CTRL_TXCLK_DISABLE |
1351                                             CLOCK_CTRL_44MHZ_CORE);
1352                         } else {
1353                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1354                         }
1355
1356                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357                                     tp->pci_clock_ctrl | newbits3, 40);
1358                 }
1359         }
1360
1361         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363                 /* Turn off the PHY */
1364                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1368                         tg3_power_down_phy(tp);
1369                 }
1370         }
1371
1372         tg3_frob_aux_power(tp);
1373
1374         /* Workaround for unstable PLL clock */
1375         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377                 u32 val = tr32(0x7d00);
1378
1379                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1380                 tw32(0x7d00, val);
1381                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1382                         int err;
1383
1384                         err = tg3_nvram_lock(tp);
1385                         tg3_halt_cpu(tp, RX_CPU_BASE);
1386                         if (!err)
1387                                 tg3_nvram_unlock(tp);
1388                 }
1389         }
1390
1391         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1392
1393         /* Finally, set the new power state. */
1394         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1395         udelay(100);    /* Delay after power state change */
1396
1397         return 0;
1398 }
1399
1400 static void tg3_link_report(struct tg3 *tp)
1401 {
1402         if (!netif_carrier_ok(tp->dev)) {
1403                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1404         } else {
1405                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1406                        tp->dev->name,
1407                        (tp->link_config.active_speed == SPEED_1000 ?
1408                         1000 :
1409                         (tp->link_config.active_speed == SPEED_100 ?
1410                          100 : 10)),
1411                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1412                         "full" : "half"));
1413
1414                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1415                        "%s for RX.\n",
1416                        tp->dev->name,
1417                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1419         }
1420 }
1421
1422 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1423 {
1424         u32 new_tg3_flags = 0;
1425         u32 old_rx_mode = tp->rx_mode;
1426         u32 old_tx_mode = tp->tx_mode;
1427
1428         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1429
1430                 /* Convert 1000BaseX flow control bits to 1000BaseT
1431                  * bits before resolving flow control.
1432                  */
1433                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435                                        ADVERTISE_PAUSE_ASYM);
1436                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1437
1438                         if (local_adv & ADVERTISE_1000XPAUSE)
1439                                 local_adv |= ADVERTISE_PAUSE_CAP;
1440                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1442                         if (remote_adv & LPA_1000XPAUSE)
1443                                 remote_adv |= LPA_PAUSE_CAP;
1444                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1445                                 remote_adv |= LPA_PAUSE_ASYM;
1446                 }
1447
1448                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                                 if (remote_adv & LPA_PAUSE_CAP)
1451                                         new_tg3_flags |=
1452                                                 (TG3_FLAG_RX_PAUSE |
1453                                                 TG3_FLAG_TX_PAUSE);
1454                                 else if (remote_adv & LPA_PAUSE_ASYM)
1455                                         new_tg3_flags |=
1456                                                 (TG3_FLAG_RX_PAUSE);
1457                         } else {
1458                                 if (remote_adv & LPA_PAUSE_CAP)
1459                                         new_tg3_flags |=
1460                                                 (TG3_FLAG_RX_PAUSE |
1461                                                 TG3_FLAG_TX_PAUSE);
1462                         }
1463                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464                         if ((remote_adv & LPA_PAUSE_CAP) &&
1465                         (remote_adv & LPA_PAUSE_ASYM))
1466                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1467                 }
1468
1469                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470                 tp->tg3_flags |= new_tg3_flags;
1471         } else {
1472                 new_tg3_flags = tp->tg3_flags;
1473         }
1474
1475         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1479
1480         if (old_rx_mode != tp->rx_mode) {
1481                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1482         }
1483         
1484         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1486         else
1487                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1488
1489         if (old_tx_mode != tp->tx_mode) {
1490                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1491         }
1492 }
1493
1494 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1495 {
1496         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497         case MII_TG3_AUX_STAT_10HALF:
1498                 *speed = SPEED_10;
1499                 *duplex = DUPLEX_HALF;
1500                 break;
1501
1502         case MII_TG3_AUX_STAT_10FULL:
1503                 *speed = SPEED_10;
1504                 *duplex = DUPLEX_FULL;
1505                 break;
1506
1507         case MII_TG3_AUX_STAT_100HALF:
1508                 *speed = SPEED_100;
1509                 *duplex = DUPLEX_HALF;
1510                 break;
1511
1512         case MII_TG3_AUX_STAT_100FULL:
1513                 *speed = SPEED_100;
1514                 *duplex = DUPLEX_FULL;
1515                 break;
1516
1517         case MII_TG3_AUX_STAT_1000HALF:
1518                 *speed = SPEED_1000;
1519                 *duplex = DUPLEX_HALF;
1520                 break;
1521
1522         case MII_TG3_AUX_STAT_1000FULL:
1523                 *speed = SPEED_1000;
1524                 *duplex = DUPLEX_FULL;
1525                 break;
1526
1527         default:
1528                 *speed = SPEED_INVALID;
1529                 *duplex = DUPLEX_INVALID;
1530                 break;
1531         };
1532 }
1533
1534 static void tg3_phy_copper_begin(struct tg3 *tp)
1535 {
1536         u32 new_adv;
1537         int i;
1538
1539         if (tp->link_config.phy_is_low_power) {
1540                 /* Entering low power mode.  Disable gigabit and
1541                  * 100baseT advertisements.
1542                  */
1543                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1544
1545                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1549
1550                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551         } else if (tp->link_config.speed == SPEED_INVALID) {
1552                 tp->link_config.advertising =
1553                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556                          ADVERTISED_Autoneg | ADVERTISED_MII);
1557
1558                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559                         tp->link_config.advertising &=
1560                                 ~(ADVERTISED_1000baseT_Half |
1561                                   ADVERTISED_1000baseT_Full);
1562
1563                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565                         new_adv |= ADVERTISE_10HALF;
1566                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567                         new_adv |= ADVERTISE_10FULL;
1568                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569                         new_adv |= ADVERTISE_100HALF;
1570                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571                         new_adv |= ADVERTISE_100FULL;
1572                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573
1574                 if (tp->link_config.advertising &
1575                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1576                         new_adv = 0;
1577                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1586                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1587                 } else {
1588                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1589                 }
1590         } else {
1591                 /* Asking for a specific link mode. */
1592                 if (tp->link_config.speed == SPEED_1000) {
1593                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1595
1596                         if (tp->link_config.duplex == DUPLEX_FULL)
1597                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1598                         else
1599                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1604                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1605                 } else {
1606                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1607
1608                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609                         if (tp->link_config.speed == SPEED_100) {
1610                                 if (tp->link_config.duplex == DUPLEX_FULL)
1611                                         new_adv |= ADVERTISE_100FULL;
1612                                 else
1613                                         new_adv |= ADVERTISE_100HALF;
1614                         } else {
1615                                 if (tp->link_config.duplex == DUPLEX_FULL)
1616                                         new_adv |= ADVERTISE_10FULL;
1617                                 else
1618                                         new_adv |= ADVERTISE_10HALF;
1619                         }
1620                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1621                 }
1622         }
1623
1624         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625             tp->link_config.speed != SPEED_INVALID) {
1626                 u32 bmcr, orig_bmcr;
1627
1628                 tp->link_config.active_speed = tp->link_config.speed;
1629                 tp->link_config.active_duplex = tp->link_config.duplex;
1630
1631                 bmcr = 0;
1632                 switch (tp->link_config.speed) {
1633                 default:
1634                 case SPEED_10:
1635                         break;
1636
1637                 case SPEED_100:
1638                         bmcr |= BMCR_SPEED100;
1639                         break;
1640
1641                 case SPEED_1000:
1642                         bmcr |= TG3_BMCR_SPEED1000;
1643                         break;
1644                 };
1645
1646                 if (tp->link_config.duplex == DUPLEX_FULL)
1647                         bmcr |= BMCR_FULLDPLX;
1648
1649                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650                     (bmcr != orig_bmcr)) {
1651                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652                         for (i = 0; i < 1500; i++) {
1653                                 u32 tmp;
1654
1655                                 udelay(10);
1656                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657                                     tg3_readphy(tp, MII_BMSR, &tmp))
1658                                         continue;
1659                                 if (!(tmp & BMSR_LSTATUS)) {
1660                                         udelay(40);
1661                                         break;
1662                                 }
1663                         }
1664                         tg3_writephy(tp, MII_BMCR, bmcr);
1665                         udelay(40);
1666                 }
1667         } else {
1668                 tg3_writephy(tp, MII_BMCR,
1669                              BMCR_ANENABLE | BMCR_ANRESTART);
1670         }
1671 }
1672
1673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1674 {
1675         int err;
1676
1677         /* Turn off tap power management. */
1678         /* Set Extended packet length bit */
1679         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1680
1681         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1683
1684         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1686
1687         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1692
1693         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1695
1696         udelay(40);
1697
1698         return err;
1699 }
1700
1701 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1702 {
1703         u32 adv_reg, all_mask;
1704
1705         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1706                 return 0;
1707
1708         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1710         if ((adv_reg & all_mask) != all_mask)
1711                 return 0;
1712         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1713                 u32 tg3_ctrl;
1714
1715                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1716                         return 0;
1717
1718                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719                             MII_TG3_CTRL_ADV_1000_FULL);
1720                 if ((tg3_ctrl & all_mask) != all_mask)
1721                         return 0;
1722         }
1723         return 1;
1724 }
1725
1726 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1727 {
1728         int current_link_up;
1729         u32 bmsr, dummy;
1730         u16 current_speed;
1731         u8 current_duplex;
1732         int i, err;
1733
1734         tw32(MAC_EVENT, 0);
1735
1736         tw32_f(MAC_STATUS,
1737              (MAC_STATUS_SYNC_CHANGED |
1738               MAC_STATUS_CFG_CHANGED |
1739               MAC_STATUS_MI_COMPLETION |
1740               MAC_STATUS_LNKSTATE_CHANGED));
1741         udelay(40);
1742
1743         tp->mi_mode = MAC_MI_MODE_BASE;
1744         tw32_f(MAC_MI_MODE, tp->mi_mode);
1745         udelay(80);
1746
1747         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1748
1749         /* Some third-party PHYs need to be reset on link going
1750          * down.
1751          */
1752         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755             netif_carrier_ok(tp->dev)) {
1756                 tg3_readphy(tp, MII_BMSR, &bmsr);
1757                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758                     !(bmsr & BMSR_LSTATUS))
1759                         force_reset = 1;
1760         }
1761         if (force_reset)
1762                 tg3_phy_reset(tp);
1763
1764         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765                 tg3_readphy(tp, MII_BMSR, &bmsr);
1766                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1768                         bmsr = 0;
1769
1770                 if (!(bmsr & BMSR_LSTATUS)) {
1771                         err = tg3_init_5401phy_dsp(tp);
1772                         if (err)
1773                                 return err;
1774
1775                         tg3_readphy(tp, MII_BMSR, &bmsr);
1776                         for (i = 0; i < 1000; i++) {
1777                                 udelay(10);
1778                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779                                     (bmsr & BMSR_LSTATUS)) {
1780                                         udelay(40);
1781                                         break;
1782                                 }
1783                         }
1784
1785                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786                             !(bmsr & BMSR_LSTATUS) &&
1787                             tp->link_config.active_speed == SPEED_1000) {
1788                                 err = tg3_phy_reset(tp);
1789                                 if (!err)
1790                                         err = tg3_init_5401phy_dsp(tp);
1791                                 if (err)
1792                                         return err;
1793                         }
1794                 }
1795         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797                 /* 5701 {A0,B0} CRC bug workaround */
1798                 tg3_writephy(tp, 0x15, 0x0a75);
1799                 tg3_writephy(tp, 0x1c, 0x8c68);
1800                 tg3_writephy(tp, 0x1c, 0x8d68);
1801                 tg3_writephy(tp, 0x1c, 0x8c68);
1802         }
1803
1804         /* Clear pending interrupts... */
1805         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807
1808         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1810         else
1811                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1812
1813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1818                 else
1819                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1820         }
1821
1822         current_link_up = 0;
1823         current_speed = SPEED_INVALID;
1824         current_duplex = DUPLEX_INVALID;
1825
1826         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1827                 u32 val;
1828
1829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831                 if (!(val & (1 << 10))) {
1832                         val |= (1 << 10);
1833                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1834                         goto relink;
1835                 }
1836         }
1837
1838         bmsr = 0;
1839         for (i = 0; i < 100; i++) {
1840                 tg3_readphy(tp, MII_BMSR, &bmsr);
1841                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842                     (bmsr & BMSR_LSTATUS))
1843                         break;
1844                 udelay(40);
1845         }
1846
1847         if (bmsr & BMSR_LSTATUS) {
1848                 u32 aux_stat, bmcr;
1849
1850                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851                 for (i = 0; i < 2000; i++) {
1852                         udelay(10);
1853                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1854                             aux_stat)
1855                                 break;
1856                 }
1857
1858                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1859                                              &current_speed,
1860                                              &current_duplex);
1861
1862                 bmcr = 0;
1863                 for (i = 0; i < 200; i++) {
1864                         tg3_readphy(tp, MII_BMCR, &bmcr);
1865                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1866                                 continue;
1867                         if (bmcr && bmcr != 0x7fff)
1868                                 break;
1869                         udelay(10);
1870                 }
1871
1872                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873                         if (bmcr & BMCR_ANENABLE) {
1874                                 current_link_up = 1;
1875
1876                                 /* Force autoneg restart if we are exiting
1877                                  * low power mode.
1878                                  */
1879                                 if (!tg3_copper_is_advertising_all(tp))
1880                                         current_link_up = 0;
1881                         } else {
1882                                 current_link_up = 0;
1883                         }
1884                 } else {
1885                         if (!(bmcr & BMCR_ANENABLE) &&
1886                             tp->link_config.speed == current_speed &&
1887                             tp->link_config.duplex == current_duplex) {
1888                                 current_link_up = 1;
1889                         } else {
1890                                 current_link_up = 0;
1891                         }
1892                 }
1893
1894                 tp->link_config.active_speed = current_speed;
1895                 tp->link_config.active_duplex = current_duplex;
1896         }
1897
1898         if (current_link_up == 1 &&
1899             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901                 u32 local_adv, remote_adv;
1902
1903                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1904                         local_adv = 0;
1905                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1906
1907                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1908                         remote_adv = 0;
1909
1910                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1911
1912                 /* If we are not advertising full pause capability,
1913                  * something is wrong.  Bring the link down and reconfigure.
1914                  */
1915                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916                         current_link_up = 0;
1917                 } else {
1918                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1919                 }
1920         }
1921 relink:
1922         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1923                 u32 tmp;
1924
1925                 tg3_phy_copper_begin(tp);
1926
1927                 tg3_readphy(tp, MII_BMSR, &tmp);
1928                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929                     (tmp & BMSR_LSTATUS))
1930                         current_link_up = 1;
1931         }
1932
1933         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934         if (current_link_up == 1) {
1935                 if (tp->link_config.active_speed == SPEED_100 ||
1936                     tp->link_config.active_speed == SPEED_10)
1937                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1938                 else
1939                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1940         } else
1941                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942
1943         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944         if (tp->link_config.active_duplex == DUPLEX_HALF)
1945                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1946
1947         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950                     (current_link_up == 1 &&
1951                      tp->link_config.active_speed == SPEED_10))
1952                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953         } else {
1954                 if (current_link_up == 1)
1955                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1956         }
1957
1958         /* ??? Without this setting Netgear GA302T PHY does not
1959          * ??? send/receive packets...
1960          */
1961         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1965                 udelay(80);
1966         }
1967
1968         tw32_f(MAC_MODE, tp->mac_mode);
1969         udelay(40);
1970
1971         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972                 /* Polled via timer. */
1973                 tw32_f(MAC_EVENT, 0);
1974         } else {
1975                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1976         }
1977         udelay(40);
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980             current_link_up == 1 &&
1981             tp->link_config.active_speed == SPEED_1000 &&
1982             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1984                 udelay(120);
1985                 tw32_f(MAC_STATUS,
1986                      (MAC_STATUS_SYNC_CHANGED |
1987                       MAC_STATUS_CFG_CHANGED));
1988                 udelay(40);
1989                 tg3_write_mem(tp,
1990                               NIC_SRAM_FIRMWARE_MBOX,
1991                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1992         }
1993
1994         if (current_link_up != netif_carrier_ok(tp->dev)) {
1995                 if (current_link_up)
1996                         netif_carrier_on(tp->dev);
1997                 else
1998                         netif_carrier_off(tp->dev);
1999                 tg3_link_report(tp);
2000         }
2001
2002         return 0;
2003 }
2004
2005 struct tg3_fiber_aneginfo {
2006         int state;
2007 #define ANEG_STATE_UNKNOWN              0
2008 #define ANEG_STATE_AN_ENABLE            1
2009 #define ANEG_STATE_RESTART_INIT         2
2010 #define ANEG_STATE_RESTART              3
2011 #define ANEG_STATE_DISABLE_LINK_OK      4
2012 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2013 #define ANEG_STATE_ABILITY_DETECT       6
2014 #define ANEG_STATE_ACK_DETECT_INIT      7
2015 #define ANEG_STATE_ACK_DETECT           8
2016 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2017 #define ANEG_STATE_COMPLETE_ACK         10
2018 #define ANEG_STATE_IDLE_DETECT_INIT     11
2019 #define ANEG_STATE_IDLE_DETECT          12
2020 #define ANEG_STATE_LINK_OK              13
2021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2022 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2023
2024         u32 flags;
2025 #define MR_AN_ENABLE            0x00000001
2026 #define MR_RESTART_AN           0x00000002
2027 #define MR_AN_COMPLETE          0x00000004
2028 #define MR_PAGE_RX              0x00000008
2029 #define MR_NP_LOADED            0x00000010
2030 #define MR_TOGGLE_TX            0x00000020
2031 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2032 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2033 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2034 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2038 #define MR_TOGGLE_RX            0x00002000
2039 #define MR_NP_RX                0x00004000
2040
2041 #define MR_LINK_OK              0x80000000
2042
2043         unsigned long link_time, cur_time;
2044
2045         u32 ability_match_cfg;
2046         int ability_match_count;
2047
2048         char ability_match, idle_match, ack_match;
2049
2050         u32 txconfig, rxconfig;
2051 #define ANEG_CFG_NP             0x00000080
2052 #define ANEG_CFG_ACK            0x00000040
2053 #define ANEG_CFG_RF2            0x00000020
2054 #define ANEG_CFG_RF1            0x00000010
2055 #define ANEG_CFG_PS2            0x00000001
2056 #define ANEG_CFG_PS1            0x00008000
2057 #define ANEG_CFG_HD             0x00004000
2058 #define ANEG_CFG_FD             0x00002000
2059 #define ANEG_CFG_INVAL          0x00001f06
2060
2061 };
2062 #define ANEG_OK         0
2063 #define ANEG_DONE       1
2064 #define ANEG_TIMER_ENAB 2
2065 #define ANEG_FAILED     -1
2066
2067 #define ANEG_STATE_SETTLE_TIME  10000
2068
2069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070                                    struct tg3_fiber_aneginfo *ap)
2071 {
2072         unsigned long delta;
2073         u32 rx_cfg_reg;
2074         int ret;
2075
2076         if (ap->state == ANEG_STATE_UNKNOWN) {
2077                 ap->rxconfig = 0;
2078                 ap->link_time = 0;
2079                 ap->cur_time = 0;
2080                 ap->ability_match_cfg = 0;
2081                 ap->ability_match_count = 0;
2082                 ap->ability_match = 0;
2083                 ap->idle_match = 0;
2084                 ap->ack_match = 0;
2085         }
2086         ap->cur_time++;
2087
2088         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2090
2091                 if (rx_cfg_reg != ap->ability_match_cfg) {
2092                         ap->ability_match_cfg = rx_cfg_reg;
2093                         ap->ability_match = 0;
2094                         ap->ability_match_count = 0;
2095                 } else {
2096                         if (++ap->ability_match_count > 1) {
2097                                 ap->ability_match = 1;
2098                                 ap->ability_match_cfg = rx_cfg_reg;
2099                         }
2100                 }
2101                 if (rx_cfg_reg & ANEG_CFG_ACK)
2102                         ap->ack_match = 1;
2103                 else
2104                         ap->ack_match = 0;
2105
2106                 ap->idle_match = 0;
2107         } else {
2108                 ap->idle_match = 1;
2109                 ap->ability_match_cfg = 0;
2110                 ap->ability_match_count = 0;
2111                 ap->ability_match = 0;
2112                 ap->ack_match = 0;
2113
2114                 rx_cfg_reg = 0;
2115         }
2116
2117         ap->rxconfig = rx_cfg_reg;
2118         ret = ANEG_OK;
2119
2120         switch(ap->state) {
2121         case ANEG_STATE_UNKNOWN:
2122                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123                         ap->state = ANEG_STATE_AN_ENABLE;
2124
2125                 /* fallthru */
2126         case ANEG_STATE_AN_ENABLE:
2127                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128                 if (ap->flags & MR_AN_ENABLE) {
2129                         ap->link_time = 0;
2130                         ap->cur_time = 0;
2131                         ap->ability_match_cfg = 0;
2132                         ap->ability_match_count = 0;
2133                         ap->ability_match = 0;
2134                         ap->idle_match = 0;
2135                         ap->ack_match = 0;
2136
2137                         ap->state = ANEG_STATE_RESTART_INIT;
2138                 } else {
2139                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2140                 }
2141                 break;
2142
2143         case ANEG_STATE_RESTART_INIT:
2144                 ap->link_time = ap->cur_time;
2145                 ap->flags &= ~(MR_NP_LOADED);
2146                 ap->txconfig = 0;
2147                 tw32(MAC_TX_AUTO_NEG, 0);
2148                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149                 tw32_f(MAC_MODE, tp->mac_mode);
2150                 udelay(40);
2151
2152                 ret = ANEG_TIMER_ENAB;
2153                 ap->state = ANEG_STATE_RESTART;
2154
2155                 /* fallthru */
2156         case ANEG_STATE_RESTART:
2157                 delta = ap->cur_time - ap->link_time;
2158                 if (delta > ANEG_STATE_SETTLE_TIME) {
2159                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2160                 } else {
2161                         ret = ANEG_TIMER_ENAB;
2162                 }
2163                 break;
2164
2165         case ANEG_STATE_DISABLE_LINK_OK:
2166                 ret = ANEG_DONE;
2167                 break;
2168
2169         case ANEG_STATE_ABILITY_DETECT_INIT:
2170                 ap->flags &= ~(MR_TOGGLE_TX);
2171                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174                 tw32_f(MAC_MODE, tp->mac_mode);
2175                 udelay(40);
2176
2177                 ap->state = ANEG_STATE_ABILITY_DETECT;
2178                 break;
2179
2180         case ANEG_STATE_ABILITY_DETECT:
2181                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2183                 }
2184                 break;
2185
2186         case ANEG_STATE_ACK_DETECT_INIT:
2187                 ap->txconfig |= ANEG_CFG_ACK;
2188                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190                 tw32_f(MAC_MODE, tp->mac_mode);
2191                 udelay(40);
2192
2193                 ap->state = ANEG_STATE_ACK_DETECT;
2194
2195                 /* fallthru */
2196         case ANEG_STATE_ACK_DETECT:
2197                 if (ap->ack_match != 0) {
2198                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2201                         } else {
2202                                 ap->state = ANEG_STATE_AN_ENABLE;
2203                         }
2204                 } else if (ap->ability_match != 0 &&
2205                            ap->rxconfig == 0) {
2206                         ap->state = ANEG_STATE_AN_ENABLE;
2207                 }
2208                 break;
2209
2210         case ANEG_STATE_COMPLETE_ACK_INIT:
2211                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2212                         ret = ANEG_FAILED;
2213                         break;
2214                 }
2215                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216                                MR_LP_ADV_HALF_DUPLEX |
2217                                MR_LP_ADV_SYM_PAUSE |
2218                                MR_LP_ADV_ASYM_PAUSE |
2219                                MR_LP_ADV_REMOTE_FAULT1 |
2220                                MR_LP_ADV_REMOTE_FAULT2 |
2221                                MR_LP_ADV_NEXT_PAGE |
2222                                MR_TOGGLE_RX |
2223                                MR_NP_RX);
2224                 if (ap->rxconfig & ANEG_CFG_FD)
2225                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226                 if (ap->rxconfig & ANEG_CFG_HD)
2227                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228                 if (ap->rxconfig & ANEG_CFG_PS1)
2229                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230                 if (ap->rxconfig & ANEG_CFG_PS2)
2231                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232                 if (ap->rxconfig & ANEG_CFG_RF1)
2233                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234                 if (ap->rxconfig & ANEG_CFG_RF2)
2235                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236                 if (ap->rxconfig & ANEG_CFG_NP)
2237                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2238
2239                 ap->link_time = ap->cur_time;
2240
2241                 ap->flags ^= (MR_TOGGLE_TX);
2242                 if (ap->rxconfig & 0x0008)
2243                         ap->flags |= MR_TOGGLE_RX;
2244                 if (ap->rxconfig & ANEG_CFG_NP)
2245                         ap->flags |= MR_NP_RX;
2246                 ap->flags |= MR_PAGE_RX;
2247
2248                 ap->state = ANEG_STATE_COMPLETE_ACK;
2249                 ret = ANEG_TIMER_ENAB;
2250                 break;
2251
2252         case ANEG_STATE_COMPLETE_ACK:
2253                 if (ap->ability_match != 0 &&
2254                     ap->rxconfig == 0) {
2255                         ap->state = ANEG_STATE_AN_ENABLE;
2256                         break;
2257                 }
2258                 delta = ap->cur_time - ap->link_time;
2259                 if (delta > ANEG_STATE_SETTLE_TIME) {
2260                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2262                         } else {
2263                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264                                     !(ap->flags & MR_NP_RX)) {
2265                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2266                                 } else {
2267                                         ret = ANEG_FAILED;
2268                                 }
2269                         }
2270                 }
2271                 break;
2272
2273         case ANEG_STATE_IDLE_DETECT_INIT:
2274                 ap->link_time = ap->cur_time;
2275                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276                 tw32_f(MAC_MODE, tp->mac_mode);
2277                 udelay(40);
2278
2279                 ap->state = ANEG_STATE_IDLE_DETECT;
2280                 ret = ANEG_TIMER_ENAB;
2281                 break;
2282
2283         case ANEG_STATE_IDLE_DETECT:
2284                 if (ap->ability_match != 0 &&
2285                     ap->rxconfig == 0) {
2286                         ap->state = ANEG_STATE_AN_ENABLE;
2287                         break;
2288                 }
2289                 delta = ap->cur_time - ap->link_time;
2290                 if (delta > ANEG_STATE_SETTLE_TIME) {
2291                         /* XXX another gem from the Broadcom driver :( */
2292                         ap->state = ANEG_STATE_LINK_OK;
2293                 }
2294                 break;
2295
2296         case ANEG_STATE_LINK_OK:
2297                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2298                 ret = ANEG_DONE;
2299                 break;
2300
2301         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302                 /* ??? unimplemented */
2303                 break;
2304
2305         case ANEG_STATE_NEXT_PAGE_WAIT:
2306                 /* ??? unimplemented */
2307                 break;
2308
2309         default:
2310                 ret = ANEG_FAILED;
2311                 break;
2312         };
2313
2314         return ret;
2315 }
2316
2317 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2318 {
2319         int res = 0;
2320         struct tg3_fiber_aneginfo aninfo;
2321         int status = ANEG_FAILED;
2322         unsigned int tick;
2323         u32 tmp;
2324
2325         tw32_f(MAC_TX_AUTO_NEG, 0);
2326
2327         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2329         udelay(40);
2330
2331         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2332         udelay(40);
2333
2334         memset(&aninfo, 0, sizeof(aninfo));
2335         aninfo.flags |= MR_AN_ENABLE;
2336         aninfo.state = ANEG_STATE_UNKNOWN;
2337         aninfo.cur_time = 0;
2338         tick = 0;
2339         while (++tick < 195000) {
2340                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341                 if (status == ANEG_DONE || status == ANEG_FAILED)
2342                         break;
2343
2344                 udelay(1);
2345         }
2346
2347         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348         tw32_f(MAC_MODE, tp->mac_mode);
2349         udelay(40);
2350
2351         *flags = aninfo.flags;
2352
2353         if (status == ANEG_DONE &&
2354             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355                              MR_LP_ADV_FULL_DUPLEX)))
2356                 res = 1;
2357
2358         return res;
2359 }
2360
2361 static void tg3_init_bcm8002(struct tg3 *tp)
2362 {
2363         u32 mac_status = tr32(MAC_STATUS);
2364         int i;
2365
2366         /* Reset when initting first time or we have a link. */
2367         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368             !(mac_status & MAC_STATUS_PCS_SYNCED))
2369                 return;
2370
2371         /* Set PLL lock range. */
2372         tg3_writephy(tp, 0x16, 0x8007);
2373
2374         /* SW reset */
2375         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2376
2377         /* Wait for reset to complete. */
2378         /* XXX schedule_timeout() ... */
2379         for (i = 0; i < 500; i++)
2380                 udelay(10);
2381
2382         /* Config mode; select PMA/Ch 1 regs. */
2383         tg3_writephy(tp, 0x10, 0x8411);
2384
2385         /* Enable auto-lock and comdet, select txclk for tx. */
2386         tg3_writephy(tp, 0x11, 0x0a10);
2387
2388         tg3_writephy(tp, 0x18, 0x00a0);
2389         tg3_writephy(tp, 0x16, 0x41ff);
2390
2391         /* Assert and deassert POR. */
2392         tg3_writephy(tp, 0x13, 0x0400);
2393         udelay(40);
2394         tg3_writephy(tp, 0x13, 0x0000);
2395
2396         tg3_writephy(tp, 0x11, 0x0a50);
2397         udelay(40);
2398         tg3_writephy(tp, 0x11, 0x0a10);
2399
2400         /* Wait for signal to stabilize */
2401         /* XXX schedule_timeout() ... */
2402         for (i = 0; i < 15000; i++)
2403                 udelay(10);
2404
2405         /* Deselect the channel register so we can read the PHYID
2406          * later.
2407          */
2408         tg3_writephy(tp, 0x10, 0x8011);
2409 }
2410
2411 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2412 {
2413         u32 sg_dig_ctrl, sg_dig_status;
2414         u32 serdes_cfg, expected_sg_dig_ctrl;
2415         int workaround, port_a;
2416         int current_link_up;
2417
2418         serdes_cfg = 0;
2419         expected_sg_dig_ctrl = 0;
2420         workaround = 0;
2421         port_a = 1;
2422         current_link_up = 0;
2423
2424         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2426                 workaround = 1;
2427                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2428                         port_a = 0;
2429
2430                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431                 /* preserve bits 20-23 for voltage regulator */
2432                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2433         }
2434
2435         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2436
2437         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438                 if (sg_dig_ctrl & (1 << 31)) {
2439                         if (workaround) {
2440                                 u32 val = serdes_cfg;
2441
2442                                 if (port_a)
2443                                         val |= 0xc010000;
2444                                 else
2445                                         val |= 0x4010000;
2446                                 tw32_f(MAC_SERDES_CFG, val);
2447                         }
2448                         tw32_f(SG_DIG_CTRL, 0x01388400);
2449                 }
2450                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451                         tg3_setup_flow_control(tp, 0, 0);
2452                         current_link_up = 1;
2453                 }
2454                 goto out;
2455         }
2456
2457         /* Want auto-negotiation.  */
2458         expected_sg_dig_ctrl = 0x81388400;
2459
2460         /* Pause capability */
2461         expected_sg_dig_ctrl |= (1 << 11);
2462
2463         /* Asymettric pause */
2464         expected_sg_dig_ctrl |= (1 << 12);
2465
2466         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2467                 if (workaround)
2468                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2470                 udelay(5);
2471                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2472
2473                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475                                  MAC_STATUS_SIGNAL_DET)) {
2476                 int i;
2477
2478                 /* Giver time to negotiate (~200ms) */
2479                 for (i = 0; i < 40000; i++) {
2480                         sg_dig_status = tr32(SG_DIG_STATUS);
2481                         if (sg_dig_status & (0x3))
2482                                 break;
2483                         udelay(5);
2484                 }
2485                 mac_status = tr32(MAC_STATUS);
2486
2487                 if ((sg_dig_status & (1 << 1)) &&
2488                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489                         u32 local_adv, remote_adv;
2490
2491                         local_adv = ADVERTISE_PAUSE_CAP;
2492                         remote_adv = 0;
2493                         if (sg_dig_status & (1 << 19))
2494                                 remote_adv |= LPA_PAUSE_CAP;
2495                         if (sg_dig_status & (1 << 20))
2496                                 remote_adv |= LPA_PAUSE_ASYM;
2497
2498                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2499                         current_link_up = 1;
2500                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501                 } else if (!(sg_dig_status & (1 << 1))) {
2502                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2504                         else {
2505                                 if (workaround) {
2506                                         u32 val = serdes_cfg;
2507
2508                                         if (port_a)
2509                                                 val |= 0xc010000;
2510                                         else
2511                                                 val |= 0x4010000;
2512
2513                                         tw32_f(MAC_SERDES_CFG, val);
2514                                 }
2515
2516                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2517                                 udelay(40);
2518
2519                                 /* Link parallel detection - link is up */
2520                                 /* only if we have PCS_SYNC and not */
2521                                 /* receiving config code words */
2522                                 mac_status = tr32(MAC_STATUS);
2523                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525                                         tg3_setup_flow_control(tp, 0, 0);
2526                                         current_link_up = 1;
2527                                 }
2528                         }
2529                 }
2530         }
2531
2532 out:
2533         return current_link_up;
2534 }
2535
2536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2537 {
2538         int current_link_up = 0;
2539
2540         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2542                 goto out;
2543         }
2544
2545         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2546                 u32 flags;
2547                 int i;
2548   
2549                 if (fiber_autoneg(tp, &flags)) {
2550                         u32 local_adv, remote_adv;
2551
2552                         local_adv = ADVERTISE_PAUSE_CAP;
2553                         remote_adv = 0;
2554                         if (flags & MR_LP_ADV_SYM_PAUSE)
2555                                 remote_adv |= LPA_PAUSE_CAP;
2556                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2557                                 remote_adv |= LPA_PAUSE_ASYM;
2558
2559                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2560
2561                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562                         current_link_up = 1;
2563                 }
2564                 for (i = 0; i < 30; i++) {
2565                         udelay(20);
2566                         tw32_f(MAC_STATUS,
2567                                (MAC_STATUS_SYNC_CHANGED |
2568                                 MAC_STATUS_CFG_CHANGED));
2569                         udelay(40);
2570                         if ((tr32(MAC_STATUS) &
2571                              (MAC_STATUS_SYNC_CHANGED |
2572                               MAC_STATUS_CFG_CHANGED)) == 0)
2573                                 break;
2574                 }
2575
2576                 mac_status = tr32(MAC_STATUS);
2577                 if (current_link_up == 0 &&
2578                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579                     !(mac_status & MAC_STATUS_RCVD_CFG))
2580                         current_link_up = 1;
2581         } else {
2582                 /* Forcing 1000FD link up. */
2583                 current_link_up = 1;
2584                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2585
2586                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2587                 udelay(40);
2588         }
2589
2590 out:
2591         return current_link_up;
2592 }
2593
2594 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2595 {
2596         u32 orig_pause_cfg;
2597         u16 orig_active_speed;
2598         u8 orig_active_duplex;
2599         u32 mac_status;
2600         int current_link_up;
2601         int i;
2602
2603         orig_pause_cfg =
2604                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605                                   TG3_FLAG_TX_PAUSE));
2606         orig_active_speed = tp->link_config.active_speed;
2607         orig_active_duplex = tp->link_config.active_duplex;
2608
2609         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610             netif_carrier_ok(tp->dev) &&
2611             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612                 mac_status = tr32(MAC_STATUS);
2613                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614                                MAC_STATUS_SIGNAL_DET |
2615                                MAC_STATUS_CFG_CHANGED |
2616                                MAC_STATUS_RCVD_CFG);
2617                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618                                    MAC_STATUS_SIGNAL_DET)) {
2619                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620                                             MAC_STATUS_CFG_CHANGED));
2621                         return 0;
2622                 }
2623         }
2624
2625         tw32_f(MAC_TX_AUTO_NEG, 0);
2626
2627         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629         tw32_f(MAC_MODE, tp->mac_mode);
2630         udelay(40);
2631
2632         if (tp->phy_id == PHY_ID_BCM8002)
2633                 tg3_init_bcm8002(tp);
2634
2635         /* Enable link change event even when serdes polling.  */
2636         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2637         udelay(40);
2638
2639         current_link_up = 0;
2640         mac_status = tr32(MAC_STATUS);
2641
2642         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2644         else
2645                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2646
2647         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648         tw32_f(MAC_MODE, tp->mac_mode);
2649         udelay(40);
2650
2651         tp->hw_status->status =
2652                 (SD_STATUS_UPDATED |
2653                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2654
2655         for (i = 0; i < 100; i++) {
2656                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657                                     MAC_STATUS_CFG_CHANGED));
2658                 udelay(5);
2659                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660                                          MAC_STATUS_CFG_CHANGED)) == 0)
2661                         break;
2662         }
2663
2664         mac_status = tr32(MAC_STATUS);
2665         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666                 current_link_up = 0;
2667                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668                         tw32_f(MAC_MODE, (tp->mac_mode |
2669                                           MAC_MODE_SEND_CONFIGS));
2670                         udelay(1);
2671                         tw32_f(MAC_MODE, tp->mac_mode);
2672                 }
2673         }
2674
2675         if (current_link_up == 1) {
2676                 tp->link_config.active_speed = SPEED_1000;
2677                 tp->link_config.active_duplex = DUPLEX_FULL;
2678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679                                     LED_CTRL_LNKLED_OVERRIDE |
2680                                     LED_CTRL_1000MBPS_ON));
2681         } else {
2682                 tp->link_config.active_speed = SPEED_INVALID;
2683                 tp->link_config.active_duplex = DUPLEX_INVALID;
2684                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685                                     LED_CTRL_LNKLED_OVERRIDE |
2686                                     LED_CTRL_TRAFFIC_OVERRIDE));
2687         }
2688
2689         if (current_link_up != netif_carrier_ok(tp->dev)) {
2690                 if (current_link_up)
2691                         netif_carrier_on(tp->dev);
2692                 else
2693                         netif_carrier_off(tp->dev);
2694                 tg3_link_report(tp);
2695         } else {
2696                 u32 now_pause_cfg =
2697                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2698                                          TG3_FLAG_TX_PAUSE);
2699                 if (orig_pause_cfg != now_pause_cfg ||
2700                     orig_active_speed != tp->link_config.active_speed ||
2701                     orig_active_duplex != tp->link_config.active_duplex)
2702                         tg3_link_report(tp);
2703         }
2704
2705         return 0;
2706 }
2707
2708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2709 {
2710         int current_link_up, err = 0;
2711         u32 bmsr, bmcr;
2712         u16 current_speed;
2713         u8 current_duplex;
2714
2715         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716         tw32_f(MAC_MODE, tp->mac_mode);
2717         udelay(40);
2718
2719         tw32(MAC_EVENT, 0);
2720
2721         tw32_f(MAC_STATUS,
2722              (MAC_STATUS_SYNC_CHANGED |
2723               MAC_STATUS_CFG_CHANGED |
2724               MAC_STATUS_MI_COMPLETION |
2725               MAC_STATUS_LNKSTATE_CHANGED));
2726         udelay(40);
2727
2728         if (force_reset)
2729                 tg3_phy_reset(tp);
2730
2731         current_link_up = 0;
2732         current_speed = SPEED_INVALID;
2733         current_duplex = DUPLEX_INVALID;
2734
2735         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739                         bmsr |= BMSR_LSTATUS;
2740                 else
2741                         bmsr &= ~BMSR_LSTATUS;
2742         }
2743
2744         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2745
2746         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748                 /* do nothing, just check for link up at the end */
2749         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750                 u32 adv, new_adv;
2751
2752                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754                                   ADVERTISE_1000XPAUSE |
2755                                   ADVERTISE_1000XPSE_ASYM |
2756                                   ADVERTISE_SLCT);
2757
2758                 /* Always advertise symmetric PAUSE just like copper */
2759                 new_adv |= ADVERTISE_1000XPAUSE;
2760
2761                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762                         new_adv |= ADVERTISE_1000XHALF;
2763                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764                         new_adv |= ADVERTISE_1000XFULL;
2765
2766                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769                         tg3_writephy(tp, MII_BMCR, bmcr);
2770
2771                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2774
2775                         return err;
2776                 }
2777         } else {
2778                 u32 new_bmcr;
2779
2780                 bmcr &= ~BMCR_SPEED1000;
2781                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2782
2783                 if (tp->link_config.duplex == DUPLEX_FULL)
2784                         new_bmcr |= BMCR_FULLDPLX;
2785
2786                 if (new_bmcr != bmcr) {
2787                         /* BMCR_SPEED1000 is a reserved bit that needs
2788                          * to be set on write.
2789                          */
2790                         new_bmcr |= BMCR_SPEED1000;
2791
2792                         /* Force a linkdown */
2793                         if (netif_carrier_ok(tp->dev)) {
2794                                 u32 adv;
2795
2796                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797                                 adv &= ~(ADVERTISE_1000XFULL |
2798                                          ADVERTISE_1000XHALF |
2799                                          ADVERTISE_SLCT);
2800                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2801                                 tg3_writephy(tp, MII_BMCR, bmcr |
2802                                                            BMCR_ANRESTART |
2803                                                            BMCR_ANENABLE);
2804                                 udelay(10);
2805                                 netif_carrier_off(tp->dev);
2806                         }
2807                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2808                         bmcr = new_bmcr;
2809                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2812                             ASIC_REV_5714) {
2813                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814                                         bmsr |= BMSR_LSTATUS;
2815                                 else
2816                                         bmsr &= ~BMSR_LSTATUS;
2817                         }
2818                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2819                 }
2820         }
2821
2822         if (bmsr & BMSR_LSTATUS) {
2823                 current_speed = SPEED_1000;
2824                 current_link_up = 1;
2825                 if (bmcr & BMCR_FULLDPLX)
2826                         current_duplex = DUPLEX_FULL;
2827                 else
2828                         current_duplex = DUPLEX_HALF;
2829
2830                 if (bmcr & BMCR_ANENABLE) {
2831                         u32 local_adv, remote_adv, common;
2832
2833                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835                         common = local_adv & remote_adv;
2836                         if (common & (ADVERTISE_1000XHALF |
2837                                       ADVERTISE_1000XFULL)) {
2838                                 if (common & ADVERTISE_1000XFULL)
2839                                         current_duplex = DUPLEX_FULL;
2840                                 else
2841                                         current_duplex = DUPLEX_HALF;
2842
2843                                 tg3_setup_flow_control(tp, local_adv,
2844                                                        remote_adv);
2845                         }
2846                         else
2847                                 current_link_up = 0;
2848                 }
2849         }
2850
2851         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852         if (tp->link_config.active_duplex == DUPLEX_HALF)
2853                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2854
2855         tw32_f(MAC_MODE, tp->mac_mode);
2856         udelay(40);
2857
2858         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2859
2860         tp->link_config.active_speed = current_speed;
2861         tp->link_config.active_duplex = current_duplex;
2862
2863         if (current_link_up != netif_carrier_ok(tp->dev)) {
2864                 if (current_link_up)
2865                         netif_carrier_on(tp->dev);
2866                 else {
2867                         netif_carrier_off(tp->dev);
2868                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2869                 }
2870                 tg3_link_report(tp);
2871         }
2872         return err;
2873 }
2874
2875 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2876 {
2877         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878                 /* Give autoneg time to complete. */
2879                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2880                 return;
2881         }
2882         if (!netif_carrier_ok(tp->dev) &&
2883             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2884                 u32 bmcr;
2885
2886                 tg3_readphy(tp, MII_BMCR, &bmcr);
2887                 if (bmcr & BMCR_ANENABLE) {
2888                         u32 phy1, phy2;
2889
2890                         /* Select shadow register 0x1f */
2891                         tg3_writephy(tp, 0x1c, 0x7c00);
2892                         tg3_readphy(tp, 0x1c, &phy1);
2893
2894                         /* Select expansion interrupt status register */
2895                         tg3_writephy(tp, 0x17, 0x0f01);
2896                         tg3_readphy(tp, 0x15, &phy2);
2897                         tg3_readphy(tp, 0x15, &phy2);
2898
2899                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900                                 /* We have signal detect and not receiving
2901                                  * config code words, link is up by parallel
2902                                  * detection.
2903                                  */
2904
2905                                 bmcr &= ~BMCR_ANENABLE;
2906                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907                                 tg3_writephy(tp, MII_BMCR, bmcr);
2908                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2909                         }
2910                 }
2911         }
2912         else if (netif_carrier_ok(tp->dev) &&
2913                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2915                 u32 phy2;
2916
2917                 /* Select expansion interrupt status register */
2918                 tg3_writephy(tp, 0x17, 0x0f01);
2919                 tg3_readphy(tp, 0x15, &phy2);
2920                 if (phy2 & 0x20) {
2921                         u32 bmcr;
2922
2923                         /* Config code words received, turn on autoneg. */
2924                         tg3_readphy(tp, MII_BMCR, &bmcr);
2925                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2926
2927                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2928
2929                 }
2930         }
2931 }
2932
2933 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2934 {
2935         int err;
2936
2937         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938                 err = tg3_setup_fiber_phy(tp, force_reset);
2939         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2941         } else {
2942                 err = tg3_setup_copper_phy(tp, force_reset);
2943         }
2944
2945         if (tp->link_config.active_speed == SPEED_1000 &&
2946             tp->link_config.active_duplex == DUPLEX_HALF)
2947                 tw32(MAC_TX_LENGTHS,
2948                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949                       (6 << TX_LENGTHS_IPG_SHIFT) |
2950                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2951         else
2952                 tw32(MAC_TX_LENGTHS,
2953                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954                       (6 << TX_LENGTHS_IPG_SHIFT) |
2955                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2956
2957         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958                 if (netif_carrier_ok(tp->dev)) {
2959                         tw32(HOSTCC_STAT_COAL_TICKS,
2960                              tp->coal.stats_block_coalesce_usecs);
2961                 } else {
2962                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2963                 }
2964         }
2965
2966         return err;
2967 }
2968
2969 /* This is called whenever we suspect that the system chipset is re-
2970  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971  * is bogus tx completions. We try to recover by setting the
2972  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2973  * in the workqueue.
2974  */
2975 static void tg3_tx_recover(struct tg3 *tp)
2976 {
2977         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2979
2980         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981                "mapped I/O cycles to the network device, attempting to "
2982                "recover. Please report the problem to the driver maintainer "
2983                "and include system chipset information.\n", tp->dev->name);
2984
2985         spin_lock(&tp->lock);
2986         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2987         spin_unlock(&tp->lock);
2988 }
2989
2990 /* Tigon3 never reports partial packet sends.  So we do not
2991  * need special logic to handle SKBs that have not had all
2992  * of their frags sent yet, like SunGEM does.
2993  */
2994 static void tg3_tx(struct tg3 *tp)
2995 {
2996         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997         u32 sw_idx = tp->tx_cons;
2998
2999         while (sw_idx != hw_idx) {
3000                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001                 struct sk_buff *skb = ri->skb;
3002                 int i, tx_bug = 0;
3003
3004                 if (unlikely(skb == NULL)) {
3005                         tg3_tx_recover(tp);
3006                         return;
3007                 }
3008
3009                 pci_unmap_single(tp->pdev,
3010                                  pci_unmap_addr(ri, mapping),
3011                                  skb_headlen(skb),
3012                                  PCI_DMA_TODEVICE);
3013
3014                 ri->skb = NULL;
3015
3016                 sw_idx = NEXT_TX(sw_idx);
3017
3018                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3019                         ri = &tp->tx_buffers[sw_idx];
3020                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3021                                 tx_bug = 1;
3022
3023                         pci_unmap_page(tp->pdev,
3024                                        pci_unmap_addr(ri, mapping),
3025                                        skb_shinfo(skb)->frags[i].size,
3026                                        PCI_DMA_TODEVICE);
3027
3028                         sw_idx = NEXT_TX(sw_idx);
3029                 }
3030
3031                 dev_kfree_skb(skb);
3032
3033                 if (unlikely(tx_bug)) {
3034                         tg3_tx_recover(tp);
3035                         return;
3036                 }
3037         }
3038
3039         tp->tx_cons = sw_idx;
3040
3041         if (unlikely(netif_queue_stopped(tp->dev))) {
3042                 spin_lock(&tp->tx_lock);
3043                 if (netif_queue_stopped(tp->dev) &&
3044                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045                         netif_wake_queue(tp->dev);
3046                 spin_unlock(&tp->tx_lock);
3047         }
3048 }
3049
3050 /* Returns size of skb allocated or < 0 on error.
3051  *
3052  * We only need to fill in the address because the other members
3053  * of the RX descriptor are invariant, see tg3_init_rings.
3054  *
3055  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3056  * posting buffers we only dirty the first cache line of the RX
3057  * descriptor (containing the address).  Whereas for the RX status
3058  * buffers the cpu only reads the last cacheline of the RX descriptor
3059  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3060  */
3061 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062                             int src_idx, u32 dest_idx_unmasked)
3063 {
3064         struct tg3_rx_buffer_desc *desc;
3065         struct ring_info *map, *src_map;
3066         struct sk_buff *skb;
3067         dma_addr_t mapping;
3068         int skb_size, dest_idx;
3069
3070         src_map = NULL;
3071         switch (opaque_key) {
3072         case RXD_OPAQUE_RING_STD:
3073                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074                 desc = &tp->rx_std[dest_idx];
3075                 map = &tp->rx_std_buffers[dest_idx];
3076                 if (src_idx >= 0)
3077                         src_map = &tp->rx_std_buffers[src_idx];
3078                 skb_size = tp->rx_pkt_buf_sz;
3079                 break;
3080
3081         case RXD_OPAQUE_RING_JUMBO:
3082                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083                 desc = &tp->rx_jumbo[dest_idx];
3084                 map = &tp->rx_jumbo_buffers[dest_idx];
3085                 if (src_idx >= 0)
3086                         src_map = &tp->rx_jumbo_buffers[src_idx];
3087                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3088                 break;
3089
3090         default:
3091                 return -EINVAL;
3092         };
3093
3094         /* Do not overwrite any of the map or rp information
3095          * until we are sure we can commit to a new buffer.
3096          *
3097          * Callers depend upon this behavior and assume that
3098          * we leave everything unchanged if we fail.
3099          */
3100         skb = netdev_alloc_skb(tp->dev, skb_size);
3101         if (skb == NULL)
3102                 return -ENOMEM;
3103
3104         skb->dev = tp->dev;
3105         skb_reserve(skb, tp->rx_offset);
3106
3107         mapping = pci_map_single(tp->pdev, skb->data,
3108                                  skb_size - tp->rx_offset,
3109                                  PCI_DMA_FROMDEVICE);
3110
3111         map->skb = skb;
3112         pci_unmap_addr_set(map, mapping, mapping);
3113
3114         if (src_map != NULL)
3115                 src_map->skb = NULL;
3116
3117         desc->addr_hi = ((u64)mapping >> 32);
3118         desc->addr_lo = ((u64)mapping & 0xffffffff);
3119
3120         return skb_size;
3121 }
3122
3123 /* We only need to move over in the address because the other
3124  * members of the RX descriptor are invariant.  See notes above
3125  * tg3_alloc_rx_skb for full details.
3126  */
3127 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3128                            int src_idx, u32 dest_idx_unmasked)
3129 {
3130         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3131         struct ring_info *src_map, *dest_map;
3132         int dest_idx;
3133
3134         switch (opaque_key) {
3135         case RXD_OPAQUE_RING_STD:
3136                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3137                 dest_desc = &tp->rx_std[dest_idx];
3138                 dest_map = &tp->rx_std_buffers[dest_idx];
3139                 src_desc = &tp->rx_std[src_idx];
3140                 src_map = &tp->rx_std_buffers[src_idx];
3141                 break;
3142
3143         case RXD_OPAQUE_RING_JUMBO:
3144                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3145                 dest_desc = &tp->rx_jumbo[dest_idx];
3146                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3147                 src_desc = &tp->rx_jumbo[src_idx];
3148                 src_map = &tp->rx_jumbo_buffers[src_idx];
3149                 break;
3150
3151         default:
3152                 return;
3153         };
3154
3155         dest_map->skb = src_map->skb;
3156         pci_unmap_addr_set(dest_map, mapping,
3157                            pci_unmap_addr(src_map, mapping));
3158         dest_desc->addr_hi = src_desc->addr_hi;
3159         dest_desc->addr_lo = src_desc->addr_lo;
3160
3161         src_map->skb = NULL;
3162 }
3163
3164 #if TG3_VLAN_TAG_USED
3165 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3166 {
3167         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3168 }
3169 #endif
3170
3171 /* The RX ring scheme is composed of multiple rings which post fresh
3172  * buffers to the chip, and one special ring the chip uses to report
3173  * status back to the host.
3174  *
3175  * The special ring reports the status of received packets to the
3176  * host.  The chip does not write into the original descriptor the
3177  * RX buffer was obtained from.  The chip simply takes the original
3178  * descriptor as provided by the host, updates the status and length
3179  * field, then writes this into the next status ring entry.
3180  *
3181  * Each ring the host uses to post buffers to the chip is described
3182  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3183  * it is first placed into the on-chip ram.  When the packet's length
3184  * is known, it walks down the TG3_BDINFO entries to select the ring.
3185  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3186  * which is within the range of the new packet's length is chosen.
3187  *
3188  * The "separate ring for rx status" scheme may sound queer, but it makes
3189  * sense from a cache coherency perspective.  If only the host writes
3190  * to the buffer post rings, and only the chip writes to the rx status
3191  * rings, then cache lines never move beyond shared-modified state.
3192  * If both the host and chip were to write into the same ring, cache line
3193  * eviction could occur since both entities want it in an exclusive state.
3194  */
3195 static int tg3_rx(struct tg3 *tp, int budget)
3196 {
3197         u32 work_mask, rx_std_posted = 0;
3198         u32 sw_idx = tp->rx_rcb_ptr;
3199         u16 hw_idx;
3200         int received;
3201
3202         hw_idx = tp->hw_status->idx[0].rx_producer;
3203         /*
3204          * We need to order the read of hw_idx and the read of
3205          * the opaque cookie.
3206          */
3207         rmb();
3208         work_mask = 0;
3209         received = 0;
3210         while (sw_idx != hw_idx && budget > 0) {
3211                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3212                 unsigned int len;
3213                 struct sk_buff *skb;
3214                 dma_addr_t dma_addr;
3215                 u32 opaque_key, desc_idx, *post_ptr;
3216
3217                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3218                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3219                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3220                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3221                                                   mapping);
3222                         skb = tp->rx_std_buffers[desc_idx].skb;
3223                         post_ptr = &tp->rx_std_ptr;
3224                         rx_std_posted++;
3225                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227                                                   mapping);
3228                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229                         post_ptr = &tp->rx_jumbo_ptr;
3230                 }
3231                 else {
3232                         goto next_pkt_nopost;
3233                 }
3234
3235                 work_mask |= opaque_key;
3236
3237                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239                 drop_it:
3240                         tg3_recycle_rx(tp, opaque_key,
3241                                        desc_idx, *post_ptr);
3242                 drop_it_no_recycle:
3243                         /* Other statistics kept track of by card. */
3244                         tp->net_stats.rx_dropped++;
3245                         goto next_pkt;
3246                 }
3247
3248                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249
3250                 if (len > RX_COPY_THRESHOLD 
3251                         && tp->rx_offset == 2
3252                         /* rx_offset != 2 iff this is a 5701 card running
3253                          * in PCI-X mode [see tg3_get_invariants()] */
3254                 ) {
3255                         int skb_size;
3256
3257                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258                                                     desc_idx, *post_ptr);
3259                         if (skb_size < 0)
3260                                 goto drop_it;
3261
3262                         pci_unmap_single(tp->pdev, dma_addr,
3263                                          skb_size - tp->rx_offset,
3264                                          PCI_DMA_FROMDEVICE);
3265
3266                         skb_put(skb, len);
3267                 } else {
3268                         struct sk_buff *copy_skb;
3269
3270                         tg3_recycle_rx(tp, opaque_key,
3271                                        desc_idx, *post_ptr);
3272
3273                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3274                         if (copy_skb == NULL)
3275                                 goto drop_it_no_recycle;
3276
3277                         copy_skb->dev = tp->dev;
3278                         skb_reserve(copy_skb, 2);
3279                         skb_put(copy_skb, len);
3280                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281                         memcpy(copy_skb->data, skb->data, len);
3282                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283
3284                         /* We'll reuse the original ring buffer. */
3285                         skb = copy_skb;
3286                 }
3287
3288                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3293                 else
3294                         skb->ip_summed = CHECKSUM_NONE;
3295
3296                 skb->protocol = eth_type_trans(skb, tp->dev);
3297 #if TG3_VLAN_TAG_USED
3298                 if (tp->vlgrp != NULL &&
3299                     desc->type_flags & RXD_FLAG_VLAN) {
3300                         tg3_vlan_rx(tp, skb,
3301                                     desc->err_vlan & RXD_VLAN_MASK);
3302                 } else
3303 #endif
3304                         netif_receive_skb(skb);
3305
3306                 tp->dev->last_rx = jiffies;
3307                 received++;
3308                 budget--;
3309
3310 next_pkt:
3311                 (*post_ptr)++;
3312
3313                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3314                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3315
3316                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3317                                      TG3_64BIT_REG_LOW, idx);
3318                         work_mask &= ~RXD_OPAQUE_RING_STD;
3319                         rx_std_posted = 0;
3320                 }
3321 next_pkt_nopost:
3322                 sw_idx++;
3323                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3324
3325                 /* Refresh hw_idx to see if there is new work */
3326                 if (sw_idx == hw_idx) {
3327                         hw_idx = tp->hw_status->idx[0].rx_producer;
3328                         rmb();
3329                 }
3330         }
3331
3332         /* ACK the status ring. */
3333         tp->rx_rcb_ptr = sw_idx;
3334         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3335
3336         /* Refill RX ring(s). */
3337         if (work_mask & RXD_OPAQUE_RING_STD) {
3338                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3339                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3340                              sw_idx);
3341         }
3342         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3343                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3344                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3345                              sw_idx);
3346         }
3347         mmiowb();
3348
3349         return received;
3350 }
3351
3352 static int tg3_poll(struct net_device *netdev, int *budget)
3353 {
3354         struct tg3 *tp = netdev_priv(netdev);
3355         struct tg3_hw_status *sblk = tp->hw_status;
3356         int done;
3357
3358         /* handle link change and other phy events */
3359         if (!(tp->tg3_flags &
3360               (TG3_FLAG_USE_LINKCHG_REG |
3361                TG3_FLAG_POLL_SERDES))) {
3362                 if (sblk->status & SD_STATUS_LINK_CHG) {
3363                         sblk->status = SD_STATUS_UPDATED |
3364                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3365                         spin_lock(&tp->lock);
3366                         tg3_setup_phy(tp, 0);
3367                         spin_unlock(&tp->lock);
3368                 }
3369         }
3370
3371         /* run TX completion thread */
3372         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3373                 tg3_tx(tp);
3374                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3375                         netif_rx_complete(netdev);
3376                         schedule_work(&tp->reset_task);
3377                         return 0;
3378                 }
3379         }
3380
3381         /* run RX thread, within the bounds set by NAPI.
3382          * All RX "locking" is done by ensuring outside
3383          * code synchronizes with dev->poll()
3384          */
3385         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3386                 int orig_budget = *budget;
3387                 int work_done;
3388
3389                 if (orig_budget > netdev->quota)
3390                         orig_budget = netdev->quota;
3391
3392                 work_done = tg3_rx(tp, orig_budget);
3393
3394                 *budget -= work_done;
3395                 netdev->quota -= work_done;
3396         }
3397
3398         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3399                 tp->last_tag = sblk->status_tag;
3400                 rmb();
3401         } else
3402                 sblk->status &= ~SD_STATUS_UPDATED;
3403
3404         /* if no more work, tell net stack and NIC we're done */
3405         done = !tg3_has_work(tp);
3406         if (done) {
3407                 netif_rx_complete(netdev);
3408                 tg3_restart_ints(tp);
3409         }
3410
3411         return (done ? 0 : 1);
3412 }
3413
3414 static void tg3_irq_quiesce(struct tg3 *tp)
3415 {
3416         BUG_ON(tp->irq_sync);
3417
3418         tp->irq_sync = 1;
3419         smp_mb();
3420
3421         synchronize_irq(tp->pdev->irq);
3422 }
3423
3424 static inline int tg3_irq_sync(struct tg3 *tp)
3425 {
3426         return tp->irq_sync;
3427 }
3428
3429 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3430  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3431  * with as well.  Most of the time, this is not necessary except when
3432  * shutting down the device.
3433  */
3434 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3435 {
3436         if (irq_sync)
3437                 tg3_irq_quiesce(tp);
3438         spin_lock_bh(&tp->lock);
3439 }
3440
3441 static inline void tg3_full_unlock(struct tg3 *tp)
3442 {
3443         spin_unlock_bh(&tp->lock);
3444 }
3445
3446 /* One-shot MSI handler - Chip automatically disables interrupt
3447  * after sending MSI so driver doesn't have to do it.
3448  */
3449 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3450 {
3451         struct net_device *dev = dev_id;
3452         struct tg3 *tp = netdev_priv(dev);
3453
3454         prefetch(tp->hw_status);
3455         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3456
3457         if (likely(!tg3_irq_sync(tp)))
3458                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3459
3460         return IRQ_HANDLED;
3461 }
3462
3463 /* MSI ISR - No need to check for interrupt sharing and no need to
3464  * flush status block and interrupt mailbox. PCI ordering rules
3465  * guarantee that MSI will arrive after the status block.
3466  */
3467 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3468 {
3469         struct net_device *dev = dev_id;
3470         struct tg3 *tp = netdev_priv(dev);
3471
3472         prefetch(tp->hw_status);
3473         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3474         /*
3475          * Writing any value to intr-mbox-0 clears PCI INTA# and
3476          * chip-internal interrupt pending events.
3477          * Writing non-zero to intr-mbox-0 additional tells the
3478          * NIC to stop sending us irqs, engaging "in-intr-handler"
3479          * event coalescing.
3480          */
3481         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3482         if (likely(!tg3_irq_sync(tp)))
3483                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3484
3485         return IRQ_RETVAL(1);
3486 }
3487
3488 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3489 {
3490         struct net_device *dev = dev_id;
3491         struct tg3 *tp = netdev_priv(dev);
3492         struct tg3_hw_status *sblk = tp->hw_status;
3493         unsigned int handled = 1;
3494
3495         /* In INTx mode, it is possible for the interrupt to arrive at
3496          * the CPU before the status block posted prior to the interrupt.
3497          * Reading the PCI State register will confirm whether the
3498          * interrupt is ours and will flush the status block.
3499          */
3500         if ((sblk->status & SD_STATUS_UPDATED) ||
3501             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3502                 /*
3503                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3504                  * chip-internal interrupt pending events.
3505                  * Writing non-zero to intr-mbox-0 additional tells the
3506                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3507                  * event coalescing.
3508                  */
3509                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3510                              0x00000001);
3511                 if (tg3_irq_sync(tp))
3512                         goto out;
3513                 sblk->status &= ~SD_STATUS_UPDATED;
3514                 if (likely(tg3_has_work(tp))) {
3515                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3516                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3517                 } else {
3518                         /* No work, shared interrupt perhaps?  re-enable
3519                          * interrupts, and flush that PCI write
3520                          */
3521                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3522                                 0x00000000);
3523                 }
3524         } else {        /* shared interrupt */
3525                 handled = 0;
3526         }
3527 out:
3528         return IRQ_RETVAL(handled);
3529 }
3530
3531 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3532 {
3533         struct net_device *dev = dev_id;
3534         struct tg3 *tp = netdev_priv(dev);
3535         struct tg3_hw_status *sblk = tp->hw_status;
3536         unsigned int handled = 1;
3537
3538         /* In INTx mode, it is possible for the interrupt to arrive at
3539          * the CPU before the status block posted prior to the interrupt.
3540          * Reading the PCI State register will confirm whether the
3541          * interrupt is ours and will flush the status block.
3542          */
3543         if ((sblk->status_tag != tp->last_tag) ||
3544             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3545                 /*
3546                  * writing any value to intr-mbox-0 clears PCI INTA# and
3547                  * chip-internal interrupt pending events.
3548                  * writing non-zero to intr-mbox-0 additional tells the
3549                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3550                  * event coalescing.
3551                  */
3552                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3553                              0x00000001);
3554                 if (tg3_irq_sync(tp))
3555                         goto out;
3556                 if (netif_rx_schedule_prep(dev)) {
3557                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3558                         /* Update last_tag to mark that this status has been
3559                          * seen. Because interrupt may be shared, we may be
3560                          * racing with tg3_poll(), so only update last_tag
3561                          * if tg3_poll() is not scheduled.
3562                          */
3563                         tp->last_tag = sblk->status_tag;
3564                         __netif_rx_schedule(dev);
3565                 }
3566         } else {        /* shared interrupt */
3567                 handled = 0;
3568         }
3569 out:
3570         return IRQ_RETVAL(handled);
3571 }
3572
3573 /* ISR for interrupt test */
3574 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3575                 struct pt_regs *regs)
3576 {
3577         struct net_device *dev = dev_id;
3578         struct tg3 *tp = netdev_priv(dev);
3579         struct tg3_hw_status *sblk = tp->hw_status;
3580
3581         if ((sblk->status & SD_STATUS_UPDATED) ||
3582             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3583                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3584                              0x00000001);
3585                 return IRQ_RETVAL(1);
3586         }
3587         return IRQ_RETVAL(0);
3588 }
3589
3590 static int tg3_init_hw(struct tg3 *, int);
3591 static int tg3_halt(struct tg3 *, int, int);
3592
3593 /* Restart hardware after configuration changes, self-test, etc.
3594  * Invoked with tp->lock held.
3595  */
3596 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597 {
3598         int err;
3599
3600         err = tg3_init_hw(tp, reset_phy);
3601         if (err) {
3602                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603                        "aborting.\n", tp->dev->name);
3604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605                 tg3_full_unlock(tp);
3606                 del_timer_sync(&tp->timer);
3607                 tp->irq_sync = 0;
3608                 netif_poll_enable(tp->dev);
3609                 dev_close(tp->dev);
3610                 tg3_full_lock(tp, 0);
3611         }
3612         return err;
3613 }
3614
3615 #ifdef CONFIG_NET_POLL_CONTROLLER
3616 static void tg3_poll_controller(struct net_device *dev)
3617 {
3618         struct tg3 *tp = netdev_priv(dev);
3619
3620         tg3_interrupt(tp->pdev->irq, dev, NULL);
3621 }
3622 #endif
3623
3624 static void tg3_reset_task(void *_data)
3625 {
3626         struct tg3 *tp = _data;
3627         unsigned int restart_timer;
3628
3629         tg3_full_lock(tp, 0);
3630         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3631
3632         if (!netif_running(tp->dev)) {
3633                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3634                 tg3_full_unlock(tp);
3635                 return;
3636         }
3637
3638         tg3_full_unlock(tp);
3639
3640         tg3_netif_stop(tp);
3641
3642         tg3_full_lock(tp, 1);
3643
3644         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3645         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3646
3647         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3648                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3649                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3650                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3651                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3652         }
3653
3654         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3655         if (tg3_init_hw(tp, 1))
3656                 goto out;
3657
3658         tg3_netif_start(tp);
3659
3660         if (restart_timer)
3661                 mod_timer(&tp->timer, jiffies + 1);
3662
3663 out:
3664         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3665
3666         tg3_full_unlock(tp);
3667 }
3668
3669 static void tg3_tx_timeout(struct net_device *dev)
3670 {
3671         struct tg3 *tp = netdev_priv(dev);
3672
3673         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3674                dev->name);
3675
3676         schedule_work(&tp->reset_task);
3677 }
3678
3679 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3680 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3681 {
3682         u32 base = (u32) mapping & 0xffffffff;
3683
3684         return ((base > 0xffffdcc0) &&
3685                 (base + len + 8 < base));
3686 }
3687
3688 /* Test for DMA addresses > 40-bit */
3689 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3690                                           int len)
3691 {
3692 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3693         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3694                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3695         return 0;
3696 #else
3697         return 0;
3698 #endif
3699 }
3700
3701 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3702
3703 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3704 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3705                                        u32 last_plus_one, u32 *start,
3706                                        u32 base_flags, u32 mss)
3707 {
3708         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3709         dma_addr_t new_addr = 0;
3710         u32 entry = *start;
3711         int i, ret = 0;
3712
3713         if (!new_skb) {
3714                 ret = -1;
3715         } else {
3716                 /* New SKB is guaranteed to be linear. */
3717                 entry = *start;
3718                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3719                                           PCI_DMA_TODEVICE);
3720                 /* Make sure new skb does not cross any 4G boundaries.
3721                  * Drop the packet if it does.
3722                  */
3723                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3724                         ret = -1;
3725                         dev_kfree_skb(new_skb);
3726                         new_skb = NULL;
3727                 } else {
3728                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3729                                     base_flags, 1 | (mss << 1));
3730                         *start = NEXT_TX(entry);
3731                 }
3732         }
3733
3734         /* Now clean up the sw ring entries. */
3735         i = 0;
3736         while (entry != last_plus_one) {
3737                 int len;
3738
3739                 if (i == 0)
3740                         len = skb_headlen(skb);
3741                 else
3742                         len = skb_shinfo(skb)->frags[i-1].size;
3743                 pci_unmap_single(tp->pdev,
3744                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3745                                  len, PCI_DMA_TODEVICE);
3746                 if (i == 0) {
3747                         tp->tx_buffers[entry].skb = new_skb;
3748                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3749                 } else {
3750                         tp->tx_buffers[entry].skb = NULL;
3751                 }
3752                 entry = NEXT_TX(entry);
3753                 i++;
3754         }
3755
3756         dev_kfree_skb(skb);
3757
3758         return ret;
3759 }
3760
3761 static void tg3_set_txd(struct tg3 *tp, int entry,
3762                         dma_addr_t mapping, int len, u32 flags,
3763                         u32 mss_and_is_end)
3764 {
3765         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3766         int is_end = (mss_and_is_end & 0x1);
3767         u32 mss = (mss_and_is_end >> 1);
3768         u32 vlan_tag = 0;
3769
3770         if (is_end)
3771                 flags |= TXD_FLAG_END;
3772         if (flags & TXD_FLAG_VLAN) {
3773                 vlan_tag = flags >> 16;
3774                 flags &= 0xffff;
3775         }
3776         vlan_tag |= (mss << TXD_MSS_SHIFT);
3777
3778         txd->addr_hi = ((u64) mapping >> 32);
3779         txd->addr_lo = ((u64) mapping & 0xffffffff);
3780         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3781         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3782 }
3783
3784 /* hard_start_xmit for devices that don't have any bugs and
3785  * support TG3_FLG2_HW_TSO_2 only.
3786  */
3787 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3788 {
3789         struct tg3 *tp = netdev_priv(dev);
3790         dma_addr_t mapping;
3791         u32 len, entry, base_flags, mss;
3792
3793         len = skb_headlen(skb);
3794
3795         /* We are running in BH disabled context with netif_tx_lock
3796          * and TX reclaim runs via tp->poll inside of a software
3797          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3798          * no IRQ context deadlocks to worry about either.  Rejoice!
3799          */
3800         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3801                 if (!netif_queue_stopped(dev)) {
3802                         netif_stop_queue(dev);
3803
3804                         /* This is a hard error, log it. */
3805                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3806                                "queue awake!\n", dev->name);
3807                 }
3808                 return NETDEV_TX_BUSY;
3809         }
3810
3811         entry = tp->tx_prod;
3812         base_flags = 0;
3813 #if TG3_TSO_SUPPORT != 0
3814         mss = 0;
3815         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3816             (mss = skb_shinfo(skb)->gso_size) != 0) {
3817                 int tcp_opt_len, ip_tcp_len;
3818
3819                 if (skb_header_cloned(skb) &&
3820                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3821                         dev_kfree_skb(skb);
3822                         goto out_unlock;
3823                 }
3824
3825                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3826                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3827                 else {
3828                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3829                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3830                                      sizeof(struct tcphdr);
3831
3832                         skb->nh.iph->check = 0;
3833                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3834                                                      tcp_opt_len);
3835                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3836                 }
3837
3838                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3839                                TXD_FLAG_CPU_POST_DMA);
3840
3841                 skb->h.th->check = 0;
3842
3843         }
3844         else if (skb->ip_summed == CHECKSUM_HW)
3845                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3846 #else
3847         mss = 0;
3848         if (skb->ip_summed == CHECKSUM_HW)
3849                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3850 #endif
3851 #if TG3_VLAN_TAG_USED
3852         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3853                 base_flags |= (TXD_FLAG_VLAN |
3854                                (vlan_tx_tag_get(skb) << 16));
3855 #endif
3856
3857         /* Queue skb data, a.k.a. the main skb fragment. */
3858         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3859
3860         tp->tx_buffers[entry].skb = skb;
3861         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3862
3863         tg3_set_txd(tp, entry, mapping, len, base_flags,
3864                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3865
3866         entry = NEXT_TX(entry);
3867
3868         /* Now loop through additional data fragments, and queue them. */
3869         if (skb_shinfo(skb)->nr_frags > 0) {
3870                 unsigned int i, last;
3871
3872                 last = skb_shinfo(skb)->nr_frags - 1;
3873                 for (i = 0; i <= last; i++) {
3874                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3875
3876                         len = frag->size;
3877                         mapping = pci_map_page(tp->pdev,
3878                                                frag->page,
3879                                                frag->page_offset,
3880                                                len, PCI_DMA_TODEVICE);
3881
3882                         tp->tx_buffers[entry].skb = NULL;
3883                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3884
3885                         tg3_set_txd(tp, entry, mapping, len,
3886                                     base_flags, (i == last) | (mss << 1));
3887
3888                         entry = NEXT_TX(entry);
3889                 }
3890         }
3891
3892         /* Packets are ready, update Tx producer idx local and on card. */
3893         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3894
3895         tp->tx_prod = entry;
3896         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3897                 spin_lock(&tp->tx_lock);
3898                 netif_stop_queue(dev);
3899                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3900                         netif_wake_queue(tp->dev);
3901                 spin_unlock(&tp->tx_lock);
3902         }
3903
3904 out_unlock:
3905         mmiowb();
3906
3907         dev->trans_start = jiffies;
3908
3909         return NETDEV_TX_OK;
3910 }
3911
3912 #if TG3_TSO_SUPPORT != 0
3913 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3914
3915 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3916  * TSO header is greater than 80 bytes.
3917  */
3918 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3919 {
3920         struct sk_buff *segs, *nskb;
3921
3922         /* Estimate the number of fragments in the worst case */
3923         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3924                 netif_stop_queue(tp->dev);
3925                 return NETDEV_TX_BUSY;
3926         }
3927
3928         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3929         if (unlikely(IS_ERR(segs)))
3930                 goto tg3_tso_bug_end;
3931
3932         do {
3933                 nskb = segs;
3934                 segs = segs->next;
3935                 nskb->next = NULL;
3936                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3937         } while (segs);
3938
3939 tg3_tso_bug_end:
3940         dev_kfree_skb(skb);
3941
3942         return NETDEV_TX_OK;
3943 }
3944 #endif
3945
3946 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3947  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3948  */
3949 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3950 {
3951         struct tg3 *tp = netdev_priv(dev);
3952         dma_addr_t mapping;
3953         u32 len, entry, base_flags, mss;
3954         int would_hit_hwbug;
3955
3956         len = skb_headlen(skb);
3957
3958         /* We are running in BH disabled context with netif_tx_lock
3959          * and TX reclaim runs via tp->poll inside of a software
3960          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3961          * no IRQ context deadlocks to worry about either.  Rejoice!
3962          */
3963         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3964                 if (!netif_queue_stopped(dev)) {
3965                         netif_stop_queue(dev);
3966
3967                         /* This is a hard error, log it. */
3968                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3969                                "queue awake!\n", dev->name);
3970                 }
3971                 return NETDEV_TX_BUSY;
3972         }
3973
3974         entry = tp->tx_prod;
3975         base_flags = 0;
3976         if (skb->ip_summed == CHECKSUM_HW)
3977                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3978 #if TG3_TSO_SUPPORT != 0
3979         mss = 0;
3980         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3981             (mss = skb_shinfo(skb)->gso_size) != 0) {
3982                 int tcp_opt_len, ip_tcp_len, hdr_len;
3983
3984                 if (skb_header_cloned(skb) &&
3985                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3986                         dev_kfree_skb(skb);
3987                         goto out_unlock;
3988                 }
3989
3990                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3991                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3992
3993                 hdr_len = ip_tcp_len + tcp_opt_len;
3994                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3995                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3996                         return (tg3_tso_bug(tp, skb));
3997
3998                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3999                                TXD_FLAG_CPU_POST_DMA);
4000
4001                 skb->nh.iph->check = 0;
4002                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4003                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4004                         skb->h.th->check = 0;
4005                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4006                 }
4007                 else {
4008                         skb->h.th->check =
4009                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4010                                                    skb->nh.iph->daddr,
4011                                                    0, IPPROTO_TCP, 0);
4012                 }
4013
4014                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4015                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4016                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4017                                 int tsflags;
4018
4019                                 tsflags = ((skb->nh.iph->ihl - 5) +
4020                                            (tcp_opt_len >> 2));
4021                                 mss |= (tsflags << 11);
4022                         }
4023                 } else {
4024                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4025                                 int tsflags;
4026
4027                                 tsflags = ((skb->nh.iph->ihl - 5) +
4028                                            (tcp_opt_len >> 2));
4029                                 base_flags |= tsflags << 12;
4030                         }
4031                 }
4032         }
4033 #else
4034         mss = 0;
4035 #endif
4036 #if TG3_VLAN_TAG_USED
4037         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4038                 base_flags |= (TXD_FLAG_VLAN |
4039                                (vlan_tx_tag_get(skb) << 16));
4040 #endif
4041
4042         /* Queue skb data, a.k.a. the main skb fragment. */
4043         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4044
4045         tp->tx_buffers[entry].skb = skb;
4046         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4047
4048         would_hit_hwbug = 0;
4049
4050         if (tg3_4g_overflow_test(mapping, len))
4051                 would_hit_hwbug = 1;
4052
4053         tg3_set_txd(tp, entry, mapping, len, base_flags,
4054                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4055
4056         entry = NEXT_TX(entry);
4057
4058         /* Now loop through additional data fragments, and queue them. */
4059         if (skb_shinfo(skb)->nr_frags > 0) {
4060                 unsigned int i, last;
4061
4062                 last = skb_shinfo(skb)->nr_frags - 1;
4063                 for (i = 0; i <= last; i++) {
4064                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4065
4066                         len = frag->size;
4067                         mapping = pci_map_page(tp->pdev,
4068                                                frag->page,
4069                                                frag->page_offset,
4070                                                len, PCI_DMA_TODEVICE);
4071
4072                         tp->tx_buffers[entry].skb = NULL;
4073                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4074
4075                         if (tg3_4g_overflow_test(mapping, len))
4076                                 would_hit_hwbug = 1;
4077
4078                         if (tg3_40bit_overflow_test(tp, mapping, len))
4079                                 would_hit_hwbug = 1;
4080
4081                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4082                                 tg3_set_txd(tp, entry, mapping, len,
4083                                             base_flags, (i == last)|(mss << 1));
4084                         else
4085                                 tg3_set_txd(tp, entry, mapping, len,
4086                                             base_flags, (i == last));
4087
4088                         entry = NEXT_TX(entry);
4089                 }
4090         }
4091
4092         if (would_hit_hwbug) {
4093                 u32 last_plus_one = entry;
4094                 u32 start;
4095
4096                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4097                 start &= (TG3_TX_RING_SIZE - 1);
4098
4099                 /* If the workaround fails due to memory/mapping
4100                  * failure, silently drop this packet.
4101                  */
4102                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4103                                                 &start, base_flags, mss))
4104                         goto out_unlock;
4105
4106                 entry = start;
4107         }
4108
4109         /* Packets are ready, update Tx producer idx local and on card. */
4110         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4111
4112         tp->tx_prod = entry;
4113         if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4114                 spin_lock(&tp->tx_lock);
4115                 netif_stop_queue(dev);
4116                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4117                         netif_wake_queue(tp->dev);
4118                 spin_unlock(&tp->tx_lock);
4119         }
4120
4121 out_unlock:
4122         mmiowb();
4123
4124         dev->trans_start = jiffies;
4125
4126         return NETDEV_TX_OK;
4127 }
4128
4129 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4130                                int new_mtu)
4131 {
4132         dev->mtu = new_mtu;
4133
4134         if (new_mtu > ETH_DATA_LEN) {
4135                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4136                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4137                         ethtool_op_set_tso(dev, 0);
4138                 }
4139                 else
4140                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4141         } else {
4142                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4143                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4144                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4145         }
4146 }
4147
4148 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4149 {
4150         struct tg3 *tp = netdev_priv(dev);
4151         int err;
4152
4153         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4154                 return -EINVAL;
4155
4156         if (!netif_running(dev)) {
4157                 /* We'll just catch it later when the
4158                  * device is up'd.
4159                  */
4160                 tg3_set_mtu(dev, tp, new_mtu);
4161                 return 0;
4162         }
4163
4164         tg3_netif_stop(tp);
4165
4166         tg3_full_lock(tp, 1);
4167
4168         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4169
4170         tg3_set_mtu(dev, tp, new_mtu);
4171
4172         err = tg3_restart_hw(tp, 0);
4173
4174         if (!err)
4175                 tg3_netif_start(tp);
4176
4177         tg3_full_unlock(tp);
4178
4179         return err;
4180 }
4181
4182 /* Free up pending packets in all rx/tx rings.
4183  *
4184  * The chip has been shut down and the driver detached from
4185  * the networking, so no interrupts or new tx packets will
4186  * end up in the driver.  tp->{tx,}lock is not held and we are not
4187  * in an interrupt context and thus may sleep.
4188  */
4189 static void tg3_free_rings(struct tg3 *tp)
4190 {
4191         struct ring_info *rxp;
4192         int i;
4193
4194         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4195                 rxp = &tp->rx_std_buffers[i];
4196
4197                 if (rxp->skb == NULL)
4198                         continue;
4199                 pci_unmap_single(tp->pdev,
4200                                  pci_unmap_addr(rxp, mapping),
4201                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4202                                  PCI_DMA_FROMDEVICE);
4203                 dev_kfree_skb_any(rxp->skb);
4204                 rxp->skb = NULL;
4205         }
4206
4207         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4208                 rxp = &tp->rx_jumbo_buffers[i];
4209
4210                 if (rxp->skb == NULL)
4211                         continue;
4212                 pci_unmap_single(tp->pdev,
4213                                  pci_unmap_addr(rxp, mapping),
4214                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4215                                  PCI_DMA_FROMDEVICE);
4216                 dev_kfree_skb_any(rxp->skb);
4217                 rxp->skb = NULL;
4218         }
4219
4220         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4221                 struct tx_ring_info *txp;
4222                 struct sk_buff *skb;
4223                 int j;
4224
4225                 txp = &tp->tx_buffers[i];
4226                 skb = txp->skb;
4227
4228                 if (skb == NULL) {
4229                         i++;
4230                         continue;
4231                 }
4232
4233                 pci_unmap_single(tp->pdev,
4234                                  pci_unmap_addr(txp, mapping),
4235                                  skb_headlen(skb),
4236                                  PCI_DMA_TODEVICE);
4237                 txp->skb = NULL;
4238
4239                 i++;
4240
4241                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4242                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4243                         pci_unmap_page(tp->pdev,
4244                                        pci_unmap_addr(txp, mapping),
4245                                        skb_shinfo(skb)->frags[j].size,
4246                                        PCI_DMA_TODEVICE);
4247                         i++;
4248                 }
4249
4250                 dev_kfree_skb_any(skb);
4251         }
4252 }
4253
4254 /* Initialize tx/rx rings for packet processing.
4255  *
4256  * The chip has been shut down and the driver detached from
4257  * the networking, so no interrupts or new tx packets will
4258  * end up in the driver.  tp->{tx,}lock are held and thus
4259  * we may not sleep.
4260  */
4261 static int tg3_init_rings(struct tg3 *tp)
4262 {
4263         u32 i;
4264
4265         /* Free up all the SKBs. */
4266         tg3_free_rings(tp);
4267
4268         /* Zero out all descriptors. */
4269         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4270         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4271         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4272         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4273
4274         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4275         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4276             (tp->dev->mtu > ETH_DATA_LEN))
4277                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4278
4279         /* Initialize invariants of the rings, we only set this
4280          * stuff once.  This works because the card does not
4281          * write into the rx buffer posting rings.
4282          */
4283         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4284                 struct tg3_rx_buffer_desc *rxd;
4285
4286                 rxd = &tp->rx_std[i];
4287                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4288                         << RXD_LEN_SHIFT;
4289                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4290                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4291                                (i << RXD_OPAQUE_INDEX_SHIFT));
4292         }
4293
4294         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4295                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4296                         struct tg3_rx_buffer_desc *rxd;
4297
4298                         rxd = &tp->rx_jumbo[i];
4299                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4300                                 << RXD_LEN_SHIFT;
4301                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4302                                 RXD_FLAG_JUMBO;
4303                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4304                                (i << RXD_OPAQUE_INDEX_SHIFT));
4305                 }
4306         }
4307
4308         /* Now allocate fresh SKBs for each rx ring. */
4309         for (i = 0; i < tp->rx_pending; i++) {
4310                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4311                         printk(KERN_WARNING PFX
4312                                "%s: Using a smaller RX standard ring, "
4313                                "only %d out of %d buffers were allocated "
4314                                "successfully.\n",
4315                                tp->dev->name, i, tp->rx_pending);
4316                         if (i == 0)
4317                                 return -ENOMEM;
4318                         tp->rx_pending = i;
4319                         break;
4320                 }
4321         }
4322
4323         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4324                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4325                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4326                                              -1, i) < 0) {
4327                                 printk(KERN_WARNING PFX
4328                                        "%s: Using a smaller RX jumbo ring, "
4329                                        "only %d out of %d buffers were "
4330                                        "allocated successfully.\n",
4331                                        tp->dev->name, i, tp->rx_jumbo_pending);
4332                                 if (i == 0) {
4333                                         tg3_free_rings(tp);
4334                                         return -ENOMEM;
4335                                 }
4336                                 tp->rx_jumbo_pending = i;
4337                                 break;
4338                         }
4339                 }
4340         }
4341         return 0;
4342 }
4343
4344 /*
4345  * Must not be invoked with interrupt sources disabled and
4346  * the hardware shutdown down.
4347  */
4348 static void tg3_free_consistent(struct tg3 *tp)
4349 {
4350         kfree(tp->rx_std_buffers);
4351         tp->rx_std_buffers = NULL;
4352         if (tp->rx_std) {
4353                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4354                                     tp->rx_std, tp->rx_std_mapping);
4355                 tp->rx_std = NULL;
4356         }
4357         if (tp->rx_jumbo) {
4358                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4359                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4360                 tp->rx_jumbo = NULL;
4361         }
4362         if (tp->rx_rcb) {
4363                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4364                                     tp->rx_rcb, tp->rx_rcb_mapping);
4365                 tp->rx_rcb = NULL;
4366         }
4367         if (tp->tx_ring) {
4368                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4369                         tp->tx_ring, tp->tx_desc_mapping);
4370                 tp->tx_ring = NULL;
4371         }
4372         if (tp->hw_status) {
4373                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4374                                     tp->hw_status, tp->status_mapping);
4375                 tp->hw_status = NULL;
4376         }
4377         if (tp->hw_stats) {
4378                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4379                                     tp->hw_stats, tp->stats_mapping);
4380                 tp->hw_stats = NULL;
4381         }
4382 }
4383
4384 /*
4385  * Must not be invoked with interrupt sources disabled and
4386  * the hardware shutdown down.  Can sleep.
4387  */
4388 static int tg3_alloc_consistent(struct tg3 *tp)
4389 {
4390         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4391                                       (TG3_RX_RING_SIZE +
4392                                        TG3_RX_JUMBO_RING_SIZE)) +
4393                                      (sizeof(struct tx_ring_info) *
4394                                       TG3_TX_RING_SIZE),
4395                                      GFP_KERNEL);
4396         if (!tp->rx_std_buffers)
4397                 return -ENOMEM;
4398
4399         memset(tp->rx_std_buffers, 0,
4400                (sizeof(struct ring_info) *
4401                 (TG3_RX_RING_SIZE +
4402                  TG3_RX_JUMBO_RING_SIZE)) +
4403                (sizeof(struct tx_ring_info) *
4404                 TG3_TX_RING_SIZE));
4405
4406         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4407         tp->tx_buffers = (struct tx_ring_info *)
4408                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4409
4410         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4411                                           &tp->rx_std_mapping);
4412         if (!tp->rx_std)
4413                 goto err_out;
4414
4415         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4416                                             &tp->rx_jumbo_mapping);
4417
4418         if (!tp->rx_jumbo)
4419                 goto err_out;
4420
4421         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4422                                           &tp->rx_rcb_mapping);
4423         if (!tp->rx_rcb)
4424                 goto err_out;
4425
4426         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4427                                            &tp->tx_desc_mapping);
4428         if (!tp->tx_ring)
4429                 goto err_out;
4430
4431         tp->hw_status = pci_alloc_consistent(tp->pdev,
4432                                              TG3_HW_STATUS_SIZE,
4433                                              &tp->status_mapping);
4434         if (!tp->hw_status)
4435                 goto err_out;
4436
4437         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4438                                             sizeof(struct tg3_hw_stats),
4439                                             &tp->stats_mapping);
4440         if (!tp->hw_stats)
4441                 goto err_out;
4442
4443         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4444         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4445
4446         return 0;
4447
4448 err_out:
4449         tg3_free_consistent(tp);
4450         return -ENOMEM;
4451 }
4452
4453 #define MAX_WAIT_CNT 1000
4454
4455 /* To stop a block, clear the enable bit and poll till it
4456  * clears.  tp->lock is held.
4457  */
4458 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4459 {
4460         unsigned int i;
4461         u32 val;
4462
4463         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4464                 switch (ofs) {
4465                 case RCVLSC_MODE:
4466                 case DMAC_MODE:
4467                 case MBFREE_MODE:
4468                 case BUFMGR_MODE:
4469                 case MEMARB_MODE:
4470                         /* We can't enable/disable these bits of the
4471                          * 5705/5750, just say success.
4472                          */
4473                         return 0;
4474
4475                 default:
4476                         break;
4477                 };
4478         }
4479
4480         val = tr32(ofs);
4481         val &= ~enable_bit;
4482         tw32_f(ofs, val);
4483
4484         for (i = 0; i < MAX_WAIT_CNT; i++) {
4485                 udelay(100);
4486                 val = tr32(ofs);
4487                 if ((val & enable_bit) == 0)
4488                         break;
4489         }
4490
4491         if (i == MAX_WAIT_CNT && !silent) {
4492                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4493                        "ofs=%lx enable_bit=%x\n",
4494                        ofs, enable_bit);
4495                 return -ENODEV;
4496         }
4497
4498         return 0;
4499 }
4500
4501 /* tp->lock is held. */
4502 static int tg3_abort_hw(struct tg3 *tp, int silent)
4503 {
4504         int i, err;
4505
4506         tg3_disable_ints(tp);
4507
4508         tp->rx_mode &= ~RX_MODE_ENABLE;
4509         tw32_f(MAC_RX_MODE, tp->rx_mode);
4510         udelay(10);
4511
4512         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4513         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4514         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4515         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4516         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4517         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4518
4519         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4520         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4521         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4522         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4524         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4525         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4526
4527         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4528         tw32_f(MAC_MODE, tp->mac_mode);
4529         udelay(40);
4530
4531         tp->tx_mode &= ~TX_MODE_ENABLE;
4532         tw32_f(MAC_TX_MODE, tp->tx_mode);
4533
4534         for (i = 0; i < MAX_WAIT_CNT; i++) {
4535                 udelay(100);
4536                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4537                         break;
4538         }
4539         if (i >= MAX_WAIT_CNT) {
4540                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4541                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4542                        tp->dev->name, tr32(MAC_TX_MODE));
4543                 err |= -ENODEV;
4544         }
4545
4546         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4547         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4548         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4549
4550         tw32(FTQ_RESET, 0xffffffff);
4551         tw32(FTQ_RESET, 0x00000000);
4552
4553         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4554         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4555
4556         if (tp->hw_status)
4557                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4558         if (tp->hw_stats)
4559                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4560
4561         return err;
4562 }
4563
4564 /* tp->lock is held. */
4565 static int tg3_nvram_lock(struct tg3 *tp)
4566 {
4567         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4568                 int i;
4569
4570                 if (tp->nvram_lock_cnt == 0) {
4571                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4572                         for (i = 0; i < 8000; i++) {
4573                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4574                                         break;
4575                                 udelay(20);
4576                         }
4577                         if (i == 8000) {
4578                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4579                                 return -ENODEV;
4580                         }
4581                 }
4582                 tp->nvram_lock_cnt++;
4583         }
4584         return 0;
4585 }
4586
4587 /* tp->lock is held. */
4588 static void tg3_nvram_unlock(struct tg3 *tp)
4589 {
4590         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4591                 if (tp->nvram_lock_cnt > 0)
4592                         tp->nvram_lock_cnt--;
4593                 if (tp->nvram_lock_cnt == 0)
4594                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4595         }
4596 }
4597
4598 /* tp->lock is held. */
4599 static void tg3_enable_nvram_access(struct tg3 *tp)
4600 {
4601         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4602             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4603                 u32 nvaccess = tr32(NVRAM_ACCESS);
4604
4605                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4606         }
4607 }
4608
4609 /* tp->lock is held. */
4610 static void tg3_disable_nvram_access(struct tg3 *tp)
4611 {
4612         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4613             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4614                 u32 nvaccess = tr32(NVRAM_ACCESS);
4615
4616                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4617         }
4618 }
4619
4620 /* tp->lock is held. */
4621 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4622 {
4623         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4624                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4625
4626         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4627                 switch (kind) {
4628                 case RESET_KIND_INIT:
4629                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4630                                       DRV_STATE_START);
4631                         break;
4632
4633                 case RESET_KIND_SHUTDOWN:
4634                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4635                                       DRV_STATE_UNLOAD);
4636                         break;
4637
4638                 case RESET_KIND_SUSPEND:
4639                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4640                                       DRV_STATE_SUSPEND);
4641                         break;
4642
4643                 default:
4644                         break;
4645                 };
4646         }
4647 }
4648
4649 /* tp->lock is held. */
4650 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4651 {
4652         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4653                 switch (kind) {
4654                 case RESET_KIND_INIT:
4655                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4656                                       DRV_STATE_START_DONE);
4657                         break;
4658
4659                 case RESET_KIND_SHUTDOWN:
4660                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4661                                       DRV_STATE_UNLOAD_DONE);
4662                         break;
4663
4664                 default:
4665                         break;
4666                 };
4667         }
4668 }
4669
4670 /* tp->lock is held. */
4671 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4672 {
4673         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4674                 switch (kind) {
4675                 case RESET_KIND_INIT:
4676                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4677                                       DRV_STATE_START);
4678                         break;
4679
4680                 case RESET_KIND_SHUTDOWN:
4681                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4682                                       DRV_STATE_UNLOAD);
4683                         break;
4684
4685                 case RESET_KIND_SUSPEND:
4686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4687                                       DRV_STATE_SUSPEND);
4688                         break;
4689
4690                 default:
4691                         break;
4692                 };
4693         }
4694 }
4695
4696 static void tg3_stop_fw(struct tg3 *);
4697
4698 /* tp->lock is held. */
4699 static int tg3_chip_reset(struct tg3 *tp)
4700 {
4701         u32 val;
4702         void (*write_op)(struct tg3 *, u32, u32);
4703         int i;
4704
4705         tg3_nvram_lock(tp);
4706
4707         /* No matching tg3_nvram_unlock() after this because
4708          * chip reset below will undo the nvram lock.
4709          */
4710         tp->nvram_lock_cnt = 0;
4711
4712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4714             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4715                 tw32(GRC_FASTBOOT_PC, 0);
4716
4717         /*
4718          * We must avoid the readl() that normally takes place.
4719          * It locks machines, causes machine checks, and other
4720          * fun things.  So, temporarily disable the 5701
4721          * hardware workaround, while we do the reset.
4722          */
4723         write_op = tp->write32;
4724         if (write_op == tg3_write_flush_reg32)
4725                 tp->write32 = tg3_write32;
4726
4727         /* do the reset */
4728         val = GRC_MISC_CFG_CORECLK_RESET;
4729
4730         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4731                 if (tr32(0x7e2c) == 0x60) {
4732                         tw32(0x7e2c, 0x20);
4733                 }
4734                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4735                         tw32(GRC_MISC_CFG, (1 << 29));
4736                         val |= (1 << 29);
4737                 }
4738         }
4739
4740         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4741                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4742         tw32(GRC_MISC_CFG, val);
4743
4744         /* restore 5701 hardware bug workaround write method */
4745         tp->write32 = write_op;
4746
4747         /* Unfortunately, we have to delay before the PCI read back.
4748          * Some 575X chips even will not respond to a PCI cfg access
4749          * when the reset command is given to the chip.
4750          *
4751          * How do these hardware designers expect things to work
4752          * properly if the PCI write is posted for a long period
4753          * of time?  It is always necessary to have some method by
4754          * which a register read back can occur to push the write
4755          * out which does the reset.
4756          *
4757          * For most tg3 variants the trick below was working.
4758          * Ho hum...
4759          */
4760         udelay(120);
4761
4762         /* Flush PCI posted writes.  The normal MMIO registers
4763          * are inaccessible at this time so this is the only
4764          * way to make this reliably (actually, this is no longer
4765          * the case, see above).  I tried to use indirect
4766          * register read/write but this upset some 5701 variants.
4767          */
4768         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4769
4770         udelay(120);
4771
4772         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4773                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4774                         int i;
4775                         u32 cfg_val;
4776
4777                         /* Wait for link training to complete.  */
4778                         for (i = 0; i < 5000; i++)
4779                                 udelay(100);
4780
4781                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4782                         pci_write_config_dword(tp->pdev, 0xc4,
4783                                                cfg_val | (1 << 15));
4784                 }
4785                 /* Set PCIE max payload size and clear error status.  */
4786                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4787         }
4788
4789         /* Re-enable indirect register accesses. */
4790         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4791                                tp->misc_host_ctrl);
4792
4793         /* Set MAX PCI retry to zero. */
4794         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4795         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4796             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4797                 val |= PCISTATE_RETRY_SAME_DMA;
4798         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4799
4800         pci_restore_state(tp->pdev);
4801
4802         /* Make sure PCI-X relaxed ordering bit is clear. */
4803         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4804         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4805         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4806
4807         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4808                 u32 val;
4809
4810                 /* Chip reset on 5780 will reset MSI enable bit,
4811                  * so need to restore it.
4812                  */
4813                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4814                         u16 ctrl;
4815
4816                         pci_read_config_word(tp->pdev,
4817                                              tp->msi_cap + PCI_MSI_FLAGS,
4818                                              &ctrl);
4819                         pci_write_config_word(tp->pdev,
4820                                               tp->msi_cap + PCI_MSI_FLAGS,
4821                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4822                         val = tr32(MSGINT_MODE);
4823                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4824                 }
4825
4826                 val = tr32(MEMARB_MODE);
4827                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4828
4829         } else
4830                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4831
4832         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4833                 tg3_stop_fw(tp);
4834                 tw32(0x5000, 0x400);
4835         }
4836
4837         tw32(GRC_MODE, tp->grc_mode);
4838
4839         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4840                 u32 val = tr32(0xc4);
4841
4842                 tw32(0xc4, val | (1 << 15));
4843         }
4844
4845         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4847                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4848                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4849                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4850                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4851         }
4852
4853         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4854                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4855                 tw32_f(MAC_MODE, tp->mac_mode);
4856         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4857                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4858                 tw32_f(MAC_MODE, tp->mac_mode);
4859         } else
4860                 tw32_f(MAC_MODE, 0);
4861         udelay(40);
4862
4863         /* Wait for firmware initialization to complete. */
4864         for (i = 0; i < 100000; i++) {
4865                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4866                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4867                         break;
4868                 udelay(10);
4869         }
4870
4871         /* Chip might not be fitted with firmare.  Some Sun onboard
4872          * parts are configured like that.  So don't signal the timeout
4873          * of the above loop as an error, but do report the lack of
4874          * running firmware once.
4875          */
4876         if (i >= 100000 &&
4877             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4878                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4879
4880                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4881                        tp->dev->name);
4882         }
4883
4884         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4885             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4886                 u32 val = tr32(0x7c00);
4887
4888                 tw32(0x7c00, val | (1 << 25));
4889         }
4890
4891         /* Reprobe ASF enable state.  */
4892         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4893         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4894         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4895         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4896                 u32 nic_cfg;
4897
4898                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4899                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4900                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4901                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4902                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4903                 }
4904         }
4905
4906         return 0;
4907 }
4908
4909 /* tp->lock is held. */
4910 static void tg3_stop_fw(struct tg3 *tp)
4911 {
4912         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4913                 u32 val;
4914                 int i;
4915
4916                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4917                 val = tr32(GRC_RX_CPU_EVENT);
4918                 val |= (1 << 14);
4919                 tw32(GRC_RX_CPU_EVENT, val);
4920
4921                 /* Wait for RX cpu to ACK the event.  */
4922                 for (i = 0; i < 100; i++) {
4923                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4924                                 break;
4925                         udelay(1);
4926                 }
4927         }
4928 }
4929
4930 /* tp->lock is held. */
4931 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4932 {
4933         int err;
4934
4935         tg3_stop_fw(tp);
4936
4937         tg3_write_sig_pre_reset(tp, kind);
4938
4939         tg3_abort_hw(tp, silent);
4940         err = tg3_chip_reset(tp);
4941
4942         tg3_write_sig_legacy(tp, kind);
4943         tg3_write_sig_post_reset(tp, kind);
4944
4945         if (err)
4946                 return err;
4947
4948         return 0;
4949 }
4950
4951 #define TG3_FW_RELEASE_MAJOR    0x0
4952 #define TG3_FW_RELASE_MINOR     0x0
4953 #define TG3_FW_RELEASE_FIX      0x0
4954 #define TG3_FW_START_ADDR       0x08000000
4955 #define TG3_FW_TEXT_ADDR        0x08000000
4956 #define TG3_FW_TEXT_LEN         0x9c0
4957 #define TG3_FW_RODATA_ADDR      0x080009c0
4958 #define TG3_FW_RODATA_LEN       0x60
4959 #define TG3_FW_DATA_ADDR        0x08000a40
4960 #define TG3_FW_DATA_LEN         0x20
4961 #define TG3_FW_SBSS_ADDR        0x08000a60
4962 #define TG3_FW_SBSS_LEN         0xc
4963 #define TG3_FW_BSS_ADDR         0x08000a70
4964 #define TG3_FW_BSS_LEN          0x10
4965
4966 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4967         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4968         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4969         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4970         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4971         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4972         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4973         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4974         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4975         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4976         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4977         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4978         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4979         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4980         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4981         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4982         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4983         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4984         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4985         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4986         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4987         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4988         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4989         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4990         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4991         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4992         0, 0, 0, 0, 0, 0,
4993         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4994         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4995         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4996         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4997         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4998         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4999         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5000         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5001         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5002         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5003         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5004         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5005         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5006         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5007         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5008         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5009         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5010         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5011         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5012         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5013         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5014         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5015         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5016         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5017         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5018         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5019         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5020         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5021         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5022         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5023         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5024         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5025         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5026         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5027         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5028         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5029         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5030         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5031         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5032         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5033         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5034         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5035         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5036         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5037         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5038         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5039         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5040         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5041         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5042         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5043         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5044         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5045         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5046         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5047         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5048         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5049         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5050         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5051         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5052         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5053         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5054         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5055         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5056         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5057         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5058 };
5059
5060 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5061         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5062         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5063         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5064         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5065         0x00000000
5066 };
5067
5068 #if 0 /* All zeros, don't eat up space with it. */
5069 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5070         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5071         0x00000000, 0x00000000, 0x00000000, 0x00000000
5072 };
5073 #endif
5074
5075 #define RX_CPU_SCRATCH_BASE     0x30000
5076 #define RX_CPU_SCRATCH_SIZE     0x04000
5077 #define TX_CPU_SCRATCH_BASE     0x34000
5078 #define TX_CPU_SCRATCH_SIZE     0x04000
5079
5080 /* tp->lock is held. */
5081 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5082 {
5083         int i;
5084
5085         BUG_ON(offset == TX_CPU_BASE &&
5086             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5087
5088         if (offset == RX_CPU_BASE) {
5089                 for (i = 0; i < 10000; i++) {
5090                         tw32(offset + CPU_STATE, 0xffffffff);
5091                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5092                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5093                                 break;
5094                 }
5095
5096                 tw32(offset + CPU_STATE, 0xffffffff);
5097                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5098                 udelay(10);
5099         } else {
5100                 for (i = 0; i < 10000; i++) {
5101                         tw32(offset + CPU_STATE, 0xffffffff);
5102                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5103                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5104                                 break;
5105                 }
5106         }
5107
5108         if (i >= 10000) {
5109                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5110                        "and %s CPU\n",
5111                        tp->dev->name,
5112                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5113                 return -ENODEV;
5114         }
5115
5116         /* Clear firmware's nvram arbitration. */
5117         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5118                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5119         return 0;
5120 }
5121
5122 struct fw_info {
5123         unsigned int text_base;
5124         unsigned int text_len;
5125         u32 *text_data;
5126         unsigned int rodata_base;
5127         unsigned int rodata_len;
5128         u32 *rodata_data;
5129         unsigned int data_base;
5130         unsigned int data_len;
5131         u32 *data_data;
5132 };
5133
5134 /* tp->lock is held. */
5135 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5136                                  int cpu_scratch_size, struct fw_info *info)
5137 {
5138         int err, lock_err, i;
5139         void (*write_op)(struct tg3 *, u32, u32);
5140
5141         if (cpu_base == TX_CPU_BASE &&
5142             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5143                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5144                        "TX cpu firmware on %s which is 5705.\n",
5145                        tp->dev->name);
5146                 return -EINVAL;
5147         }
5148
5149         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5150                 write_op = tg3_write_mem;
5151         else
5152                 write_op = tg3_write_indirect_reg32;
5153
5154         /* It is possible that bootcode is still loading at this point.
5155          * Get the nvram lock first before halting the cpu.
5156          */
5157         lock_err = tg3_nvram_lock(tp);
5158         err = tg3_halt_cpu(tp, cpu_base);
5159         if (!lock_err)
5160                 tg3_nvram_unlock(tp);
5161         if (err)
5162                 goto out;
5163
5164         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5165                 write_op(tp, cpu_scratch_base + i, 0);
5166         tw32(cpu_base + CPU_STATE, 0xffffffff);
5167         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5168         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5169                 write_op(tp, (cpu_scratch_base +
5170                               (info->text_base & 0xffff) +
5171                               (i * sizeof(u32))),
5172                          (info->text_data ?
5173                           info->text_data[i] : 0));
5174         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5175                 write_op(tp, (cpu_scratch_base +
5176                               (info->rodata_base & 0xffff) +
5177                               (i * sizeof(u32))),
5178                          (info->rodata_data ?
5179                           info->rodata_data[i] : 0));
5180         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5181                 write_op(tp, (cpu_scratch_base +
5182                               (info->data_base & 0xffff) +
5183                               (i * sizeof(u32))),
5184                          (info->data_data ?
5185                           info->data_data[i] : 0));
5186
5187         err = 0;
5188
5189 out:
5190         return err;
5191 }
5192
5193 /* tp->lock is held. */
5194 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5195 {
5196         struct fw_info info;
5197         int err, i;
5198
5199         info.text_base = TG3_FW_TEXT_ADDR;
5200         info.text_len = TG3_FW_TEXT_LEN;
5201         info.text_data = &tg3FwText[0];
5202         info.rodata_base = TG3_FW_RODATA_ADDR;
5203         info.rodata_len = TG3_FW_RODATA_LEN;
5204         info.rodata_data = &tg3FwRodata[0];
5205         info.data_base = TG3_FW_DATA_ADDR;
5206         info.data_len = TG3_FW_DATA_LEN;
5207         info.data_data = NULL;
5208
5209         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5210                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5211                                     &info);
5212         if (err)
5213                 return err;
5214
5215         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5216                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5217                                     &info);
5218         if (err)
5219                 return err;
5220
5221         /* Now startup only the RX cpu. */
5222         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5223         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5224
5225         for (i = 0; i < 5; i++) {
5226                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5227                         break;
5228                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5229                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5230                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5231                 udelay(1000);
5232         }
5233         if (i >= 5) {
5234                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5235                        "to set RX CPU PC, is %08x should be %08x\n",
5236                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5237                        TG3_FW_TEXT_ADDR);
5238                 return -ENODEV;
5239         }
5240         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5241         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5242
5243         return 0;
5244 }
5245
5246 #if TG3_TSO_SUPPORT != 0
5247
5248 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5249 #define TG3_TSO_FW_RELASE_MINOR         0x6
5250 #define TG3_TSO_FW_RELEASE_FIX          0x0
5251 #define TG3_TSO_FW_START_ADDR           0x08000000
5252 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5253 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5254 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5255 #define TG3_TSO_FW_RODATA_LEN           0x60
5256 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5257 #define TG3_TSO_FW_DATA_LEN             0x30
5258 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5259 #define TG3_TSO_FW_SBSS_LEN             0x2c
5260 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5261 #define TG3_TSO_FW_BSS_LEN              0x894
5262
5263 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5264         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5265         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5266         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5267         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5268         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5269         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5270         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5271         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5272         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5273         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5274         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5275         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5276         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5277         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5278         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5279         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5280         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5281         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5282         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5283         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5284         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5285         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5286         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5287         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5288         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5289         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5290         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5291         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5292         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5293         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5294         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5295         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5296         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5297         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5298         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5299         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5300         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5301         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5302         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5303         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5304         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5305         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5306         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5307         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5308         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5309         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5310         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5311         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5312         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5313         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5314         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5315         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5316         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5317         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5318         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5319         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5320         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5321         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5322         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5323         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5324         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5325         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5326         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5327         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5328         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5329         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5330         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5331         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5332         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5333         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5334         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5335         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5336         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5337         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5338         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5339         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5340         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5341         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5342         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5343         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5344         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5345         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5346         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5347         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5348         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5349         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5350         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5351         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5352         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5353         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5354         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5355         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5356         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5357         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5358         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5359         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5360         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5361         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5362         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5363         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5364         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5365         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5366         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5367         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5368         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5369         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5370         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5371         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5372         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5373         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5374         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5375         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5376         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5377         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5378         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5379         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5380         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5381         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5382         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5383         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5384         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5385         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5386         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5387         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5388         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5389         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5390         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5391         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5392         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5393         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5394         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5395         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5396         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5397         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5398         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5399         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5400         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5401         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5402         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5403         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5404         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5405         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5406         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5407         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5408         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5409         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5410         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5411         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5412         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5413         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5414         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5415         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5416         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5417         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5418         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5419         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5420         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5421         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5422         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5423         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5424         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5425         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5426         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5427         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5428         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5429         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5430         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5431         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5432         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5433         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5434         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5435         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5436         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5437         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5438         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5439         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5440         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5441         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5442         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5443         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5444         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5445         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5446         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5447         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5448         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5449         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5450         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5451         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5452         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5453         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5454         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5455         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5456         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5457         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5458         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5459         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5460         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5461         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5462         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5463         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5464         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5465         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5466         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5467         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5468         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5469         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5470         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5471         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5472         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5473         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5474         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5475         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5476         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5477         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5478         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5479         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5480         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5481         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5482         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5483         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5484         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5485         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5486         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5487         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5488         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5489         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5490         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5491         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5492         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5493         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5494         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5495         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5496         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5497         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5498         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5499         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5500         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5501         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5502         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5503         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5504         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5505         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5506         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5507         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5508         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5509         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5510         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5511         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5512         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5513         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5514         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5515         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5516         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5517         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5518         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5519         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5520         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5521         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5522         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5523         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5524         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5525         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5526         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5527         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5528         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5529         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5530         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5531         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5532         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5533         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5534         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5535         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5536         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5537         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5538         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5539         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5540         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5541         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5542         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5543         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5544         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5545         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5546         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5547         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5548 };
5549
5550 static u32 tg3TsoFwRodata[] = {
5551         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5552         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5553         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5554         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5555         0x00000000,
5556 };
5557
5558 static u32 tg3TsoFwData[] = {
5559         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5560         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5561         0x00000000,
5562 };
5563
5564 /* 5705 needs a special version of the TSO firmware.  */
5565 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5566 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5567 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5568 #define TG3_TSO5_FW_START_ADDR          0x00010000
5569 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5570 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5571 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5572 #define TG3_TSO5_FW_RODATA_LEN          0x50
5573 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5574 #define TG3_TSO5_FW_DATA_LEN            0x20
5575 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5576 #define TG3_TSO5_FW_SBSS_LEN            0x28
5577 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5578 #define TG3_TSO5_FW_BSS_LEN             0x88
5579
5580 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5581         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5582         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5583         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5584         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5585         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5586         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5587         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5588         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5589         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5590         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5591         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5592         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5593         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5594         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5595         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5596         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5597         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5598         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5599         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5600         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5601         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5602         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5603         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5604         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5605         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5606         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5607         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5608         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5609         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5610         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5611         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5612         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5613         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5614         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5615         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5616         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5617         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5618         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5619         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5620         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5621         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5622         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5623         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5624         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5625         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5626         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5627         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5628         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5629         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5630         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5631         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5632         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5633         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5634         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5635         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5636         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5637         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5638         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5639         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5640         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5641         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5642         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5643         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5644         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5645         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5646         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5647         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5648         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5649         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5650         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5651         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5652         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5653         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5654         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5655         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5656         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5657         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5658         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5659         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5660         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5661         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5662         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5663         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5664         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5665         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5666         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5667         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5668         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5669         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5670         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5671         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5672         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5673         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5674         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5675         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5676         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5677         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5678         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5679         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5680         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5681         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5682         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5683         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5684         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5685         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5686         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5687         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5688         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5689         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5690         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5691         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5692         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5693         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5694         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5695         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5696         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5697         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5698         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5699         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5700         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5701         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5702         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5703         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5704         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5705         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5706         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5707         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5708         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5709         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5710         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5711         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5712         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5713         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5714         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5715         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5716         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5717         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5718         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5719         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5720         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5721         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5722         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5723         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5724         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5725         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5726         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5727         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5728         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5729         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5730         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5731         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5732         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5733         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5734         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5735         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5736         0x00000000, 0x00000000, 0x00000000,
5737 };
5738
5739 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5740         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5741         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5742         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5743         0x00000000, 0x00000000, 0x00000000,
5744 };
5745
5746 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5747         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5748         0x00000000, 0x00000000, 0x00000000,
5749 };
5750
5751 /* tp->lock is held. */
5752 static int tg3_load_tso_firmware(struct tg3 *tp)
5753 {
5754         struct fw_info info;
5755         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5756         int err, i;
5757
5758         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5759                 return 0;
5760
5761         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5762                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5763                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5764                 info.text_data = &tg3Tso5FwText[0];
5765                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5766                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5767                 info.rodata_data = &tg3Tso5FwRodata[0];
5768                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5769                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5770                 info.data_data = &tg3Tso5FwData[0];
5771                 cpu_base = RX_CPU_BASE;
5772                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5773                 cpu_scratch_size = (info.text_len +
5774                                     info.rodata_len +
5775                                     info.data_len +
5776                                     TG3_TSO5_FW_SBSS_LEN +
5777                                     TG3_TSO5_FW_BSS_LEN);
5778         } else {
5779                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5780                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5781                 info.text_data = &tg3TsoFwText[0];
5782                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5783                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5784                 info.rodata_data = &tg3TsoFwRodata[0];
5785                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5786                 info.data_len = TG3_TSO_FW_DATA_LEN;
5787                 info.data_data = &tg3TsoFwData[0];
5788                 cpu_base = TX_CPU_BASE;
5789                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5790                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5791         }
5792
5793         err = tg3_load_firmware_cpu(tp, cpu_base,
5794                                     cpu_scratch_base, cpu_scratch_size,
5795                                     &info);
5796         if (err)
5797                 return err;
5798
5799         /* Now startup the cpu. */
5800         tw32(cpu_base + CPU_STATE, 0xffffffff);
5801         tw32_f(cpu_base + CPU_PC,    info.text_base);
5802
5803         for (i = 0; i < 5; i++) {
5804                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5805                         break;
5806                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5807                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5808                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5809                 udelay(1000);
5810         }
5811         if (i >= 5) {
5812                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5813                        "to set CPU PC, is %08x should be %08x\n",
5814                        tp->dev->name, tr32(cpu_base + CPU_PC),
5815                        info.text_base);
5816                 return -ENODEV;
5817         }
5818         tw32(cpu_base + CPU_STATE, 0xffffffff);
5819         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5820         return 0;
5821 }
5822
5823 #endif /* TG3_TSO_SUPPORT != 0 */
5824
5825 /* tp->lock is held. */
5826 static void __tg3_set_mac_addr(struct tg3 *tp)
5827 {
5828         u32 addr_high, addr_low;
5829         int i;
5830
5831         addr_high = ((tp->dev->dev_addr[0] << 8) |
5832                      tp->dev->dev_addr[1]);
5833         addr_low = ((tp->dev->dev_addr[2] << 24) |
5834                     (tp->dev->dev_addr[3] << 16) |
5835                     (tp->dev->dev_addr[4] <<  8) |
5836                     (tp->dev->dev_addr[5] <<  0));
5837         for (i = 0; i < 4; i++) {
5838                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5839                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5840         }
5841
5842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5843             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5844                 for (i = 0; i < 12; i++) {
5845                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5846                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5847                 }
5848         }
5849
5850         addr_high = (tp->dev->dev_addr[0] +
5851                      tp->dev->dev_addr[1] +
5852                      tp->dev->dev_addr[2] +
5853                      tp->dev->dev_addr[3] +
5854                      tp->dev->dev_addr[4] +
5855                      tp->dev->dev_addr[5]) &
5856                 TX_BACKOFF_SEED_MASK;
5857         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5858 }
5859
5860 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5861 {
5862         struct tg3 *tp = netdev_priv(dev);
5863         struct sockaddr *addr = p;
5864         int err = 0;
5865
5866         if (!is_valid_ether_addr(addr->sa_data))
5867                 return -EINVAL;
5868
5869         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5870
5871         if (!netif_running(dev))
5872                 return 0;
5873
5874         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5875                 /* Reset chip so that ASF can re-init any MAC addresses it
5876                  * needs.
5877                  */
5878                 tg3_netif_stop(tp);
5879                 tg3_full_lock(tp, 1);
5880
5881                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5882                 err = tg3_restart_hw(tp, 0);
5883                 if (!err)
5884                         tg3_netif_start(tp);
5885                 tg3_full_unlock(tp);
5886         } else {
5887                 spin_lock_bh(&tp->lock);
5888                 __tg3_set_mac_addr(tp);
5889                 spin_unlock_bh(&tp->lock);
5890         }
5891
5892         return err;
5893 }
5894
5895 /* tp->lock is held. */
5896 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5897                            dma_addr_t mapping, u32 maxlen_flags,
5898                            u32 nic_addr)
5899 {
5900         tg3_write_mem(tp,
5901                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5902                       ((u64) mapping >> 32));
5903         tg3_write_mem(tp,
5904                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5905                       ((u64) mapping & 0xffffffff));
5906         tg3_write_mem(tp,
5907                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5908                        maxlen_flags);
5909
5910         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5911                 tg3_write_mem(tp,
5912                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5913                               nic_addr);
5914 }
5915
5916 static void __tg3_set_rx_mode(struct net_device *);
5917 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5918 {
5919         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5920         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5921         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5922         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5923         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5924                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5925                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5926         }
5927         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5928         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5929         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5930                 u32 val = ec->stats_block_coalesce_usecs;
5931
5932                 if (!netif_carrier_ok(tp->dev))
5933                         val = 0;
5934
5935                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5936         }
5937 }
5938
5939 /* tp->lock is held. */
5940 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5941 {
5942         u32 val, rdmac_mode;
5943         int i, err, limit;
5944
5945         tg3_disable_ints(tp);
5946
5947         tg3_stop_fw(tp);
5948
5949         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5950
5951         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5952                 tg3_abort_hw(tp, 1);
5953         }
5954
5955         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5956                 tg3_phy_reset(tp);
5957
5958         err = tg3_chip_reset(tp);
5959         if (err)
5960                 return err;
5961
5962         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5963
5964         /* This works around an issue with Athlon chipsets on
5965          * B3 tigon3 silicon.  This bit has no effect on any
5966          * other revision.  But do not set this on PCI Express
5967          * chips.
5968          */
5969         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5970                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5971         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5972
5973         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5974             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5975                 val = tr32(TG3PCI_PCISTATE);
5976                 val |= PCISTATE_RETRY_SAME_DMA;
5977                 tw32(TG3PCI_PCISTATE, val);
5978         }
5979
5980         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5981                 /* Enable some hw fixes.  */
5982                 val = tr32(TG3PCI_MSI_DATA);
5983                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5984                 tw32(TG3PCI_MSI_DATA, val);
5985         }
5986
5987         /* Descriptor ring init may make accesses to the
5988          * NIC SRAM area to setup the TX descriptors, so we
5989          * can only do this after the hardware has been
5990          * successfully reset.
5991          */
5992         err = tg3_init_rings(tp);
5993         if (err)
5994                 return err;
5995
5996         /* This value is determined during the probe time DMA
5997          * engine test, tg3_test_dma.
5998          */
5999         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6000
6001         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6002                           GRC_MODE_4X_NIC_SEND_RINGS |
6003                           GRC_MODE_NO_TX_PHDR_CSUM |
6004                           GRC_MODE_NO_RX_PHDR_CSUM);
6005         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6006
6007         /* Pseudo-header checksum is done by hardware logic and not
6008          * the offload processers, so make the chip do the pseudo-
6009          * header checksums on receive.  For transmit it is more
6010          * convenient to do the pseudo-header checksum in software
6011          * as Linux does that on transmit for us in all cases.
6012          */
6013         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6014
6015         tw32(GRC_MODE,
6016              tp->grc_mode |
6017              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6018
6019         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6020         val = tr32(GRC_MISC_CFG);
6021         val &= ~0xff;
6022         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6023         tw32(GRC_MISC_CFG, val);
6024
6025         /* Initialize MBUF/DESC pool. */
6026         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6027                 /* Do nothing.  */
6028         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6029                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6030                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6031                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6032                 else
6033                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6034                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6035                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6036         }
6037 #if TG3_TSO_SUPPORT != 0
6038         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6039                 int fw_len;
6040
6041                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6042                           TG3_TSO5_FW_RODATA_LEN +
6043                           TG3_TSO5_FW_DATA_LEN +
6044                           TG3_TSO5_FW_SBSS_LEN +
6045                           TG3_TSO5_FW_BSS_LEN);
6046                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6047                 tw32(BUFMGR_MB_POOL_ADDR,
6048                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6049                 tw32(BUFMGR_MB_POOL_SIZE,
6050                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6051         }
6052 #endif
6053
6054         if (tp->dev->mtu <= ETH_DATA_LEN) {
6055                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6056                      tp->bufmgr_config.mbuf_read_dma_low_water);
6057                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6058                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6059                 tw32(BUFMGR_MB_HIGH_WATER,
6060                      tp->bufmgr_config.mbuf_high_water);
6061         } else {
6062                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6063                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6064                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6065                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6066                 tw32(BUFMGR_MB_HIGH_WATER,
6067                      tp->bufmgr_config.mbuf_high_water_jumbo);
6068         }
6069         tw32(BUFMGR_DMA_LOW_WATER,
6070              tp->bufmgr_config.dma_low_water);
6071         tw32(BUFMGR_DMA_HIGH_WATER,
6072              tp->bufmgr_config.dma_high_water);
6073
6074         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6075         for (i = 0; i < 2000; i++) {
6076                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6077                         break;
6078                 udelay(10);
6079         }
6080         if (i >= 2000) {
6081                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6082                        tp->dev->name);
6083                 return -ENODEV;
6084         }
6085
6086         /* Setup replenish threshold. */
6087         val = tp->rx_pending / 8;
6088         if (val == 0)
6089                 val = 1;
6090         else if (val > tp->rx_std_max_post)
6091                 val = tp->rx_std_max_post;
6092
6093         tw32(RCVBDI_STD_THRESH, val);
6094
6095         /* Initialize TG3_BDINFO's at:
6096          *  RCVDBDI_STD_BD:     standard eth size rx ring
6097          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6098          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6099          *
6100          * like so:
6101          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6102          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6103          *                              ring attribute flags
6104          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6105          *
6106          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6107          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6108          *
6109          * The size of each ring is fixed in the firmware, but the location is
6110          * configurable.
6111          */
6112         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6113              ((u64) tp->rx_std_mapping >> 32));
6114         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6115              ((u64) tp->rx_std_mapping & 0xffffffff));
6116         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6117              NIC_SRAM_RX_BUFFER_DESC);
6118
6119         /* Don't even try to program the JUMBO/MINI buffer descriptor
6120          * configs on 5705.
6121          */
6122         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6123                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6124                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6125         } else {
6126                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6127                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6128
6129                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6130                      BDINFO_FLAGS_DISABLED);
6131
6132                 /* Setup replenish threshold. */
6133                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6134
6135                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6136                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6137                              ((u64) tp->rx_jumbo_mapping >> 32));
6138                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6139                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6140                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6141                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6142                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6143                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6144                 } else {
6145                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6146                              BDINFO_FLAGS_DISABLED);
6147                 }
6148
6149         }
6150
6151         /* There is only one send ring on 5705/5750, no need to explicitly
6152          * disable the others.
6153          */
6154         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6155                 /* Clear out send RCB ring in SRAM. */
6156                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6157                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6158                                       BDINFO_FLAGS_DISABLED);
6159         }
6160
6161         tp->tx_prod = 0;
6162         tp->tx_cons = 0;
6163         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6164         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6165
6166         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6167                        tp->tx_desc_mapping,
6168                        (TG3_TX_RING_SIZE <<
6169                         BDINFO_FLAGS_MAXLEN_SHIFT),
6170                        NIC_SRAM_TX_BUFFER_DESC);
6171
6172         /* There is only one receive return ring on 5705/5750, no need
6173          * to explicitly disable the others.
6174          */
6175         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6176                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6177                      i += TG3_BDINFO_SIZE) {
6178                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6179                                       BDINFO_FLAGS_DISABLED);
6180                 }
6181         }
6182
6183         tp->rx_rcb_ptr = 0;
6184         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6185
6186         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6187                        tp->rx_rcb_mapping,
6188                        (TG3_RX_RCB_RING_SIZE(tp) <<
6189                         BDINFO_FLAGS_MAXLEN_SHIFT),
6190                        0);
6191
6192         tp->rx_std_ptr = tp->rx_pending;
6193         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6194                      tp->rx_std_ptr);
6195
6196         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6197                                                 tp->rx_jumbo_pending : 0;
6198         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6199                      tp->rx_jumbo_ptr);
6200
6201         /* Initialize MAC address and backoff seed. */
6202         __tg3_set_mac_addr(tp);
6203
6204         /* MTU + ethernet header + FCS + optional VLAN tag */
6205         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6206
6207         /* The slot time is changed by tg3_setup_phy if we
6208          * run at gigabit with half duplex.
6209          */
6210         tw32(MAC_TX_LENGTHS,
6211              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6212              (6 << TX_LENGTHS_IPG_SHIFT) |
6213              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6214
6215         /* Receive rules. */
6216         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6217         tw32(RCVLPC_CONFIG, 0x0181);
6218
6219         /* Calculate RDMAC_MODE setting early, we need it to determine
6220          * the RCVLPC_STATE_ENABLE mask.
6221          */
6222         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6223                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6224                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6225                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6226                       RDMAC_MODE_LNGREAD_ENAB);
6227         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6228                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6229
6230         /* If statement applies to 5705 and 5750 PCI devices only */
6231         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6232              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6233             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6234                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6235                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6236                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6237                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6238                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6239                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6240                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6241                 }
6242         }
6243
6244         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6245                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6246
6247 #if TG3_TSO_SUPPORT != 0
6248         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6249                 rdmac_mode |= (1 << 27);
6250 #endif
6251
6252         /* Receive/send statistics. */
6253         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6254                 val = tr32(RCVLPC_STATS_ENABLE);
6255                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6256                 tw32(RCVLPC_STATS_ENABLE, val);
6257         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6258                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6259                 val = tr32(RCVLPC_STATS_ENABLE);
6260                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6261                 tw32(RCVLPC_STATS_ENABLE, val);
6262         } else {
6263                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6264         }
6265         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6266         tw32(SNDDATAI_STATSENAB, 0xffffff);
6267         tw32(SNDDATAI_STATSCTRL,
6268              (SNDDATAI_SCTRL_ENABLE |
6269               SNDDATAI_SCTRL_FASTUPD));
6270
6271         /* Setup host coalescing engine. */
6272         tw32(HOSTCC_MODE, 0);
6273         for (i = 0; i < 2000; i++) {
6274                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6275                         break;
6276                 udelay(10);
6277         }
6278
6279         __tg3_set_coalesce(tp, &tp->coal);
6280
6281         /* set status block DMA address */
6282         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6283              ((u64) tp->status_mapping >> 32));
6284         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6285              ((u64) tp->status_mapping & 0xffffffff));
6286
6287         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6288                 /* Status/statistics block address.  See tg3_timer,
6289                  * the tg3_periodic_fetch_stats call there, and
6290                  * tg3_get_stats to see how this works for 5705/5750 chips.
6291                  */
6292                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6293                      ((u64) tp->stats_mapping >> 32));
6294                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6295                      ((u64) tp->stats_mapping & 0xffffffff));
6296                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6297                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6298         }
6299
6300         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6301
6302         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6303         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6304         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6305                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6306
6307         /* Clear statistics/status block in chip, and status block in ram. */
6308         for (i = NIC_SRAM_STATS_BLK;
6309              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6310              i += sizeof(u32)) {
6311                 tg3_write_mem(tp, i, 0);
6312                 udelay(40);
6313         }
6314         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6315
6316         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6317                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6318                 /* reset to prevent losing 1st rx packet intermittently */
6319                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6320                 udelay(10);
6321         }
6322
6323         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6324                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6325         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6326         udelay(40);
6327
6328         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6329          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6330          * register to preserve the GPIO settings for LOMs. The GPIOs,
6331          * whether used as inputs or outputs, are set by boot code after
6332          * reset.
6333          */
6334         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6335                 u32 gpio_mask;
6336
6337                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6338                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6339
6340                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6341                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6342                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6343
6344                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6345                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6346
6347                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6348
6349                 /* GPIO1 must be driven high for eeprom write protect */
6350                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6351                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6352         }
6353         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6354         udelay(100);
6355
6356         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6357         tp->last_tag = 0;
6358
6359         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6360                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6361                 udelay(40);
6362         }
6363
6364         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6365                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6366                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6367                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6368                WDMAC_MODE_LNGREAD_ENAB);
6369
6370         /* If statement applies to 5705 and 5750 PCI devices only */
6371         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6372              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6373             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6374                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6375                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6376                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6377                         /* nothing */
6378                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6379                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6380                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6381                         val |= WDMAC_MODE_RX_ACCEL;
6382                 }
6383         }
6384
6385         /* Enable host coalescing bug fix */
6386         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6387             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6388                 val |= (1 << 29);
6389
6390         tw32_f(WDMAC_MODE, val);
6391         udelay(40);
6392
6393         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6394                 val = tr32(TG3PCI_X_CAPS);
6395                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6396                         val &= ~PCIX_CAPS_BURST_MASK;
6397                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6398                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6399                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6400                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6401                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6402                                 val |= (tp->split_mode_max_reqs <<
6403                                         PCIX_CAPS_SPLIT_SHIFT);
6404                 }
6405                 tw32(TG3PCI_X_CAPS, val);
6406         }
6407
6408         tw32_f(RDMAC_MODE, rdmac_mode);
6409         udelay(40);
6410
6411         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6412         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6413                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6414         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6415         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6416         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6417         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6418         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6419 #if TG3_TSO_SUPPORT != 0
6420         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6421                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6422 #endif
6423         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6424         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6425
6426         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6427                 err = tg3_load_5701_a0_firmware_fix(tp);
6428                 if (err)
6429                         return err;
6430         }
6431
6432 #if TG3_TSO_SUPPORT != 0
6433         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6434                 err = tg3_load_tso_firmware(tp);
6435                 if (err)
6436                         return err;
6437         }
6438 #endif
6439
6440         tp->tx_mode = TX_MODE_ENABLE;
6441         tw32_f(MAC_TX_MODE, tp->tx_mode);
6442         udelay(100);
6443
6444         tp->rx_mode = RX_MODE_ENABLE;
6445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6446                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6447
6448         tw32_f(MAC_RX_MODE, tp->rx_mode);
6449         udelay(10);
6450
6451         if (tp->link_config.phy_is_low_power) {
6452                 tp->link_config.phy_is_low_power = 0;
6453                 tp->link_config.speed = tp->link_config.orig_speed;
6454                 tp->link_config.duplex = tp->link_config.orig_duplex;
6455                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6456         }
6457
6458         tp->mi_mode = MAC_MI_MODE_BASE;
6459         tw32_f(MAC_MI_MODE, tp->mi_mode);
6460         udelay(80);
6461
6462         tw32(MAC_LED_CTRL, tp->led_ctrl);
6463
6464         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6465         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6466                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6467                 udelay(10);
6468         }
6469         tw32_f(MAC_RX_MODE, tp->rx_mode);
6470         udelay(10);
6471
6472         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6473                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6474                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6475                         /* Set drive transmission level to 1.2V  */
6476                         /* only if the signal pre-emphasis bit is not set  */
6477                         val = tr32(MAC_SERDES_CFG);
6478                         val &= 0xfffff000;
6479                         val |= 0x880;
6480                         tw32(MAC_SERDES_CFG, val);
6481                 }
6482                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6483                         tw32(MAC_SERDES_CFG, 0x616000);
6484         }
6485
6486         /* Prevent chip from dropping frames when flow control
6487          * is enabled.
6488          */
6489         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6490
6491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6492             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6493                 /* Use hardware link auto-negotiation */
6494                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6495         }
6496
6497         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6498             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6499                 u32 tmp;
6500
6501                 tmp = tr32(SERDES_RX_CTRL);
6502                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6503                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6504                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6505                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6506         }
6507
6508         err = tg3_setup_phy(tp, reset_phy);
6509         if (err)
6510                 return err;
6511
6512         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6513                 u32 tmp;
6514
6515                 /* Clear CRC stats. */
6516                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6517                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6518                         tg3_readphy(tp, 0x14, &tmp);
6519                 }
6520         }
6521
6522         __tg3_set_rx_mode(tp->dev);
6523
6524         /* Initialize receive rules. */
6525         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6526         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6527         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6528         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6529
6530         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6531             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6532                 limit = 8;
6533         else
6534                 limit = 16;
6535         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6536                 limit -= 4;
6537         switch (limit) {
6538         case 16:
6539                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6540         case 15:
6541                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6542         case 14:
6543                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6544         case 13:
6545                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6546         case 12:
6547                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6548         case 11:
6549                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6550         case 10:
6551                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6552         case 9:
6553                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6554         case 8:
6555                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6556         case 7:
6557                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6558         case 6:
6559                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6560         case 5:
6561                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6562         case 4:
6563                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6564         case 3:
6565                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6566         case 2:
6567         case 1:
6568
6569         default:
6570                 break;
6571         };
6572
6573         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6574
6575         return 0;
6576 }
6577
6578 /* Called at device open time to get the chip ready for
6579  * packet processing.  Invoked with tp->lock held.
6580  */
6581 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6582 {
6583         int err;
6584
6585         /* Force the chip into D0. */
6586         err = tg3_set_power_state(tp, PCI_D0);
6587         if (err)
6588                 goto out;
6589
6590         tg3_switch_clocks(tp);
6591
6592         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6593
6594         err = tg3_reset_hw(tp, reset_phy);
6595
6596 out:
6597         return err;
6598 }
6599
6600 #define TG3_STAT_ADD32(PSTAT, REG) \
6601 do {    u32 __val = tr32(REG); \
6602         (PSTAT)->low += __val; \
6603         if ((PSTAT)->low < __val) \
6604                 (PSTAT)->high += 1; \
6605 } while (0)
6606
6607 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6608 {
6609         struct tg3_hw_stats *sp = tp->hw_stats;
6610
6611         if (!netif_carrier_ok(tp->dev))
6612                 return;
6613
6614         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6615         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6616         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6617         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6618         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6619         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6620         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6621         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6622         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6623         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6624         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6625         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6626         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6627
6628         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6629         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6630         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6631         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6632         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6633         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6634         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6635         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6636         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6637         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6638         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6639         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6640         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6641         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6642
6643         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6644         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6645         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6646 }
6647
6648 static void tg3_timer(unsigned long __opaque)
6649 {
6650         struct tg3 *tp = (struct tg3 *) __opaque;
6651
6652         if (tp->irq_sync)
6653                 goto restart_timer;
6654
6655         spin_lock(&tp->lock);
6656
6657         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6658                 /* All of this garbage is because when using non-tagged
6659                  * IRQ status the mailbox/status_block protocol the chip
6660                  * uses with the cpu is race prone.
6661                  */
6662                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6663                         tw32(GRC_LOCAL_CTRL,
6664                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6665                 } else {
6666                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6667                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6668                 }
6669
6670                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6671                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6672                         spin_unlock(&tp->lock);
6673                         schedule_work(&tp->reset_task);
6674                         return;
6675                 }
6676         }
6677
6678         /* This part only runs once per second. */
6679         if (!--tp->timer_counter) {
6680                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6681                         tg3_periodic_fetch_stats(tp);
6682
6683                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6684                         u32 mac_stat;
6685                         int phy_event;
6686
6687                         mac_stat = tr32(MAC_STATUS);
6688
6689                         phy_event = 0;
6690                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6691                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6692                                         phy_event = 1;
6693                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6694                                 phy_event = 1;
6695
6696                         if (phy_event)
6697                                 tg3_setup_phy(tp, 0);
6698                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6699                         u32 mac_stat = tr32(MAC_STATUS);
6700                         int need_setup = 0;
6701
6702                         if (netif_carrier_ok(tp->dev) &&
6703                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6704                                 need_setup = 1;
6705                         }
6706                         if (! netif_carrier_ok(tp->dev) &&
6707                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6708                                          MAC_STATUS_SIGNAL_DET))) {
6709                                 need_setup = 1;
6710                         }
6711                         if (need_setup) {
6712                                 tw32_f(MAC_MODE,
6713                                      (tp->mac_mode &
6714                                       ~MAC_MODE_PORT_MODE_MASK));
6715                                 udelay(40);
6716                                 tw32_f(MAC_MODE, tp->mac_mode);
6717                                 udelay(40);
6718                                 tg3_setup_phy(tp, 0);
6719                         }
6720                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6721                         tg3_serdes_parallel_detect(tp);
6722
6723                 tp->timer_counter = tp->timer_multiplier;
6724         }
6725
6726         /* Heartbeat is only sent once every 2 seconds.  */
6727         if (!--tp->asf_counter) {
6728                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6729                         u32 val;
6730
6731                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6732                                       FWCMD_NICDRV_ALIVE2);
6733                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6734                         /* 5 seconds timeout */
6735                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6736                         val = tr32(GRC_RX_CPU_EVENT);
6737                         val |= (1 << 14);
6738                         tw32(GRC_RX_CPU_EVENT, val);
6739                 }
6740                 tp->asf_counter = tp->asf_multiplier;
6741         }
6742
6743         spin_unlock(&tp->lock);
6744
6745 restart_timer:
6746         tp->timer.expires = jiffies + tp->timer_offset;
6747         add_timer(&tp->timer);
6748 }
6749
6750 static int tg3_request_irq(struct tg3 *tp)
6751 {
6752         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6753         unsigned long flags;
6754         struct net_device *dev = tp->dev;
6755
6756         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6757                 fn = tg3_msi;
6758                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6759                         fn = tg3_msi_1shot;
6760                 flags = IRQF_SAMPLE_RANDOM;
6761         } else {
6762                 fn = tg3_interrupt;
6763                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6764                         fn = tg3_interrupt_tagged;
6765                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6766         }
6767         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6768 }
6769
6770 static int tg3_test_interrupt(struct tg3 *tp)
6771 {
6772         struct net_device *dev = tp->dev;
6773         int err, i;
6774         u32 int_mbox = 0;
6775
6776         if (!netif_running(dev))
6777                 return -ENODEV;
6778
6779         tg3_disable_ints(tp);
6780
6781         free_irq(tp->pdev->irq, dev);
6782
6783         err = request_irq(tp->pdev->irq, tg3_test_isr,
6784                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6785         if (err)
6786                 return err;
6787
6788         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6789         tg3_enable_ints(tp);
6790
6791         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6792                HOSTCC_MODE_NOW);
6793
6794         for (i = 0; i < 5; i++) {
6795                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6796                                         TG3_64BIT_REG_LOW);
6797                 if (int_mbox != 0)
6798                         break;
6799                 msleep(10);
6800         }
6801
6802         tg3_disable_ints(tp);
6803
6804         free_irq(tp->pdev->irq, dev);
6805         
6806         err = tg3_request_irq(tp);
6807
6808         if (err)
6809                 return err;
6810
6811         if (int_mbox != 0)
6812                 return 0;
6813
6814         return -EIO;
6815 }
6816
6817 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6818  * successfully restored
6819  */
6820 static int tg3_test_msi(struct tg3 *tp)
6821 {
6822         struct net_device *dev = tp->dev;
6823         int err;
6824         u16 pci_cmd;
6825
6826         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6827                 return 0;
6828
6829         /* Turn off SERR reporting in case MSI terminates with Master
6830          * Abort.
6831          */
6832         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6833         pci_write_config_word(tp->pdev, PCI_COMMAND,
6834                               pci_cmd & ~PCI_COMMAND_SERR);
6835
6836         err = tg3_test_interrupt(tp);
6837
6838         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6839
6840         if (!err)
6841                 return 0;
6842
6843         /* other failures */
6844         if (err != -EIO)
6845                 return err;
6846
6847         /* MSI test failed, go back to INTx mode */
6848         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6849                "switching to INTx mode. Please report this failure to "
6850                "the PCI maintainer and include system chipset information.\n",
6851                        tp->dev->name);
6852
6853         free_irq(tp->pdev->irq, dev);
6854         pci_disable_msi(tp->pdev);
6855
6856         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6857
6858         err = tg3_request_irq(tp);
6859         if (err)
6860                 return err;
6861
6862         /* Need to reset the chip because the MSI cycle may have terminated
6863          * with Master Abort.
6864          */
6865         tg3_full_lock(tp, 1);
6866
6867         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6868         err = tg3_init_hw(tp, 1);
6869
6870         tg3_full_unlock(tp);
6871
6872         if (err)
6873                 free_irq(tp->pdev->irq, dev);
6874
6875         return err;
6876 }
6877
6878 static int tg3_open(struct net_device *dev)
6879 {
6880         struct tg3 *tp = netdev_priv(dev);
6881         int err;
6882
6883         tg3_full_lock(tp, 0);
6884
6885         err = tg3_set_power_state(tp, PCI_D0);
6886         if (err)
6887                 return err;
6888
6889         tg3_disable_ints(tp);
6890         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6891
6892         tg3_full_unlock(tp);
6893
6894         /* The placement of this call is tied
6895          * to the setup and use of Host TX descriptors.
6896          */
6897         err = tg3_alloc_consistent(tp);
6898         if (err)
6899                 return err;
6900
6901         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6902             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6903             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6904             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6905               (tp->pdev_peer == tp->pdev))) {
6906                 /* All MSI supporting chips should support tagged
6907                  * status.  Assert that this is the case.
6908                  */
6909                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6910                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6911                                "Not using MSI.\n", tp->dev->name);
6912                 } else if (pci_enable_msi(tp->pdev) == 0) {
6913                         u32 msi_mode;
6914
6915                         msi_mode = tr32(MSGINT_MODE);
6916                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6917                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6918                 }
6919         }
6920         err = tg3_request_irq(tp);
6921
6922         if (err) {
6923                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6924                         pci_disable_msi(tp->pdev);
6925                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6926                 }
6927                 tg3_free_consistent(tp);
6928                 return err;
6929         }
6930
6931         tg3_full_lock(tp, 0);
6932
6933         err = tg3_init_hw(tp, 1);
6934         if (err) {
6935                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6936                 tg3_free_rings(tp);
6937         } else {
6938                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6939                         tp->timer_offset = HZ;
6940                 else
6941                         tp->timer_offset = HZ / 10;
6942
6943                 BUG_ON(tp->timer_offset > HZ);
6944                 tp->timer_counter = tp->timer_multiplier =
6945                         (HZ / tp->timer_offset);
6946                 tp->asf_counter = tp->asf_multiplier =
6947                         ((HZ / tp->timer_offset) * 2);
6948
6949                 init_timer(&tp->timer);
6950                 tp->timer.expires = jiffies + tp->timer_offset;
6951                 tp->timer.data = (unsigned long) tp;
6952                 tp->timer.function = tg3_timer;
6953         }
6954
6955         tg3_full_unlock(tp);
6956
6957         if (err) {
6958                 free_irq(tp->pdev->irq, dev);
6959                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6960                         pci_disable_msi(tp->pdev);
6961                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6962                 }
6963                 tg3_free_consistent(tp);
6964                 return err;
6965         }
6966
6967         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6968                 err = tg3_test_msi(tp);
6969
6970                 if (err) {
6971                         tg3_full_lock(tp, 0);
6972
6973                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6974                                 pci_disable_msi(tp->pdev);
6975                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6976                         }
6977                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6978                         tg3_free_rings(tp);
6979                         tg3_free_consistent(tp);
6980
6981                         tg3_full_unlock(tp);
6982
6983                         return err;
6984                 }
6985
6986                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6987                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6988                                 u32 val = tr32(0x7c04);
6989
6990                                 tw32(0x7c04, val | (1 << 29));
6991                         }
6992                 }
6993         }
6994
6995         tg3_full_lock(tp, 0);
6996
6997         add_timer(&tp->timer);
6998         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6999         tg3_enable_ints(tp);
7000
7001         tg3_full_unlock(tp);
7002
7003         netif_start_queue(dev);
7004
7005         return 0;
7006 }
7007
7008 #if 0
7009 /*static*/ void tg3_dump_state(struct tg3 *tp)
7010 {
7011         u32 val32, val32_2, val32_3, val32_4, val32_5;
7012         u16 val16;
7013         int i;
7014
7015         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7016         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7017         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7018                val16, val32);
7019
7020         /* MAC block */
7021         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7022                tr32(MAC_MODE), tr32(MAC_STATUS));
7023         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7024                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7025         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7026                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7027         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7028                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7029
7030         /* Send data initiator control block */
7031         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7032                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7033         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7034                tr32(SNDDATAI_STATSCTRL));
7035
7036         /* Send data completion control block */
7037         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7038
7039         /* Send BD ring selector block */
7040         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7041                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7042
7043         /* Send BD initiator control block */
7044         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7045                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7046
7047         /* Send BD completion control block */
7048         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7049
7050         /* Receive list placement control block */
7051         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7052                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7053         printk("       RCVLPC_STATSCTRL[%08x]\n",
7054                tr32(RCVLPC_STATSCTRL));
7055
7056         /* Receive data and receive BD initiator control block */
7057         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7058                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7059
7060         /* Receive data completion control block */
7061         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7062                tr32(RCVDCC_MODE));
7063
7064         /* Receive BD initiator control block */
7065         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7066                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7067
7068         /* Receive BD completion control block */
7069         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7070                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7071
7072         /* Receive list selector control block */
7073         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7074                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7075
7076         /* Mbuf cluster free block */
7077         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7078                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7079
7080         /* Host coalescing control block */
7081         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7082                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7083         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7084                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7085                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7086         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7087                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7088                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7089         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7090                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7091         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7092                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7093
7094         /* Memory arbiter control block */
7095         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7096                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7097
7098         /* Buffer manager control block */
7099         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7100                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7101         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7102                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7103         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7104                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7105                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7106                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7107
7108         /* Read DMA control block */
7109         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7110                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7111
7112         /* Write DMA control block */
7113         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7114                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7115
7116         /* DMA completion block */
7117         printk("DEBUG: DMAC_MODE[%08x]\n",
7118                tr32(DMAC_MODE));
7119
7120         /* GRC block */
7121         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7122                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7123         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7124                tr32(GRC_LOCAL_CTRL));
7125
7126         /* TG3_BDINFOs */
7127         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7128                tr32(RCVDBDI_JUMBO_BD + 0x0),
7129                tr32(RCVDBDI_JUMBO_BD + 0x4),
7130                tr32(RCVDBDI_JUMBO_BD + 0x8),
7131                tr32(RCVDBDI_JUMBO_BD + 0xc));
7132         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7133                tr32(RCVDBDI_STD_BD + 0x0),
7134                tr32(RCVDBDI_STD_BD + 0x4),
7135                tr32(RCVDBDI_STD_BD + 0x8),
7136                tr32(RCVDBDI_STD_BD + 0xc));
7137         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7138                tr32(RCVDBDI_MINI_BD + 0x0),
7139                tr32(RCVDBDI_MINI_BD + 0x4),
7140                tr32(RCVDBDI_MINI_BD + 0x8),
7141                tr32(RCVDBDI_MINI_BD + 0xc));
7142
7143         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7144         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7145         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7146         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7147         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7148                val32, val32_2, val32_3, val32_4);
7149
7150         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7151         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7152         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7153         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7154         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7155                val32, val32_2, val32_3, val32_4);
7156
7157         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7158         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7159         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7160         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7161         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7162         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7163                val32, val32_2, val32_3, val32_4, val32_5);
7164
7165         /* SW status block */
7166         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7167                tp->hw_status->status,
7168                tp->hw_status->status_tag,
7169                tp->hw_status->rx_jumbo_consumer,
7170                tp->hw_status->rx_consumer,
7171                tp->hw_status->rx_mini_consumer,
7172                tp->hw_status->idx[0].rx_producer,
7173                tp->hw_status->idx[0].tx_consumer);
7174
7175         /* SW statistics block */
7176         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7177                ((u32 *)tp->hw_stats)[0],
7178                ((u32 *)tp->hw_stats)[1],
7179                ((u32 *)tp->hw_stats)[2],
7180                ((u32 *)tp->hw_stats)[3]);
7181
7182         /* Mailboxes */
7183         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7184                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7185                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7186                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7187                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7188
7189         /* NIC side send descriptors. */
7190         for (i = 0; i < 6; i++) {
7191                 unsigned long txd;
7192
7193                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7194                         + (i * sizeof(struct tg3_tx_buffer_desc));
7195                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7196                        i,
7197                        readl(txd + 0x0), readl(txd + 0x4),
7198                        readl(txd + 0x8), readl(txd + 0xc));
7199         }
7200
7201         /* NIC side RX descriptors. */
7202         for (i = 0; i < 6; i++) {
7203                 unsigned long rxd;
7204
7205                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7206                         + (i * sizeof(struct tg3_rx_buffer_desc));
7207                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7208                        i,
7209                        readl(rxd + 0x0), readl(rxd + 0x4),
7210                        readl(rxd + 0x8), readl(rxd + 0xc));
7211                 rxd += (4 * sizeof(u32));
7212                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7213                        i,
7214                        readl(rxd + 0x0), readl(rxd + 0x4),
7215                        readl(rxd + 0x8), readl(rxd + 0xc));
7216         }
7217
7218         for (i = 0; i < 6; i++) {
7219                 unsigned long rxd;
7220
7221                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7222                         + (i * sizeof(struct tg3_rx_buffer_desc));
7223                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7224                        i,
7225                        readl(rxd + 0x0), readl(rxd + 0x4),
7226                        readl(rxd + 0x8), readl(rxd + 0xc));
7227                 rxd += (4 * sizeof(u32));
7228                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7229                        i,
7230                        readl(rxd + 0x0), readl(rxd + 0x4),
7231                        readl(rxd + 0x8), readl(rxd + 0xc));
7232         }
7233 }
7234 #endif
7235
7236 static struct net_device_stats *tg3_get_stats(struct net_device *);
7237 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7238
7239 static int tg3_close(struct net_device *dev)
7240 {
7241         struct tg3 *tp = netdev_priv(dev);
7242
7243         /* Calling flush_scheduled_work() may deadlock because
7244          * linkwatch_event() may be on the workqueue and it will try to get
7245          * the rtnl_lock which we are holding.
7246          */
7247         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7248                 msleep(1);
7249
7250         netif_stop_queue(dev);
7251
7252         del_timer_sync(&tp->timer);
7253
7254         tg3_full_lock(tp, 1);
7255 #if 0
7256         tg3_dump_state(tp);
7257 #endif
7258
7259         tg3_disable_ints(tp);
7260
7261         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7262         tg3_free_rings(tp);
7263         tp->tg3_flags &=
7264                 ~(TG3_FLAG_INIT_COMPLETE |
7265                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7266
7267         tg3_full_unlock(tp);
7268
7269         free_irq(tp->pdev->irq, dev);
7270         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7271                 pci_disable_msi(tp->pdev);
7272                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7273         }
7274
7275         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7276                sizeof(tp->net_stats_prev));
7277         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7278                sizeof(tp->estats_prev));
7279
7280         tg3_free_consistent(tp);
7281
7282         tg3_set_power_state(tp, PCI_D3hot);
7283
7284         netif_carrier_off(tp->dev);
7285
7286         return 0;
7287 }
7288
7289 static inline unsigned long get_stat64(tg3_stat64_t *val)
7290 {
7291         unsigned long ret;
7292
7293 #if (BITS_PER_LONG == 32)
7294         ret = val->low;
7295 #else
7296         ret = ((u64)val->high << 32) | ((u64)val->low);
7297 #endif
7298         return ret;
7299 }
7300
7301 static unsigned long calc_crc_errors(struct tg3 *tp)
7302 {
7303         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7304
7305         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7306             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7307              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7308                 u32 val;
7309
7310                 spin_lock_bh(&tp->lock);
7311                 if (!tg3_readphy(tp, 0x1e, &val)) {
7312                         tg3_writephy(tp, 0x1e, val | 0x8000);
7313                         tg3_readphy(tp, 0x14, &val);
7314                 } else
7315                         val = 0;
7316                 spin_unlock_bh(&tp->lock);
7317
7318                 tp->phy_crc_errors += val;
7319
7320                 return tp->phy_crc_errors;
7321         }
7322
7323         return get_stat64(&hw_stats->rx_fcs_errors);
7324 }
7325
7326 #define ESTAT_ADD(member) \
7327         estats->member =        old_estats->member + \
7328                                 get_stat64(&hw_stats->member)
7329
7330 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7331 {
7332         struct tg3_ethtool_stats *estats = &tp->estats;
7333         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7334         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7335
7336         if (!hw_stats)
7337                 return old_estats;
7338
7339         ESTAT_ADD(rx_octets);
7340         ESTAT_ADD(rx_fragments);
7341         ESTAT_ADD(rx_ucast_packets);
7342         ESTAT_ADD(rx_mcast_packets);
7343         ESTAT_ADD(rx_bcast_packets);
7344         ESTAT_ADD(rx_fcs_errors);
7345         ESTAT_ADD(rx_align_errors);
7346         ESTAT_ADD(rx_xon_pause_rcvd);
7347         ESTAT_ADD(rx_xoff_pause_rcvd);
7348         ESTAT_ADD(rx_mac_ctrl_rcvd);
7349         ESTAT_ADD(rx_xoff_entered);
7350         ESTAT_ADD(rx_frame_too_long_errors);
7351         ESTAT_ADD(rx_jabbers);
7352         ESTAT_ADD(rx_undersize_packets);
7353         ESTAT_ADD(rx_in_length_errors);
7354         ESTAT_ADD(rx_out_length_errors);
7355         ESTAT_ADD(rx_64_or_less_octet_packets);
7356         ESTAT_ADD(rx_65_to_127_octet_packets);
7357         ESTAT_ADD(rx_128_to_255_octet_packets);
7358         ESTAT_ADD(rx_256_to_511_octet_packets);
7359         ESTAT_ADD(rx_512_to_1023_octet_packets);
7360         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7361         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7362         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7363         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7364         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7365
7366         ESTAT_ADD(tx_octets);
7367         ESTAT_ADD(tx_collisions);
7368         ESTAT_ADD(tx_xon_sent);
7369         ESTAT_ADD(tx_xoff_sent);
7370         ESTAT_ADD(tx_flow_control);
7371         ESTAT_ADD(tx_mac_errors);
7372         ESTAT_ADD(tx_single_collisions);
7373         ESTAT_ADD(tx_mult_collisions);
7374         ESTAT_ADD(tx_deferred);
7375         ESTAT_ADD(tx_excessive_collisions);
7376         ESTAT_ADD(tx_late_collisions);
7377         ESTAT_ADD(tx_collide_2times);
7378         ESTAT_ADD(tx_collide_3times);
7379         ESTAT_ADD(tx_collide_4times);
7380         ESTAT_ADD(tx_collide_5times);
7381         ESTAT_ADD(tx_collide_6times);
7382         ESTAT_ADD(tx_collide_7times);
7383         ESTAT_ADD(tx_collide_8times);
7384         ESTAT_ADD(tx_collide_9times);
7385         ESTAT_ADD(tx_collide_10times);
7386         ESTAT_ADD(tx_collide_11times);
7387         ESTAT_ADD(tx_collide_12times);
7388         ESTAT_ADD(tx_collide_13times);
7389         ESTAT_ADD(tx_collide_14times);
7390         ESTAT_ADD(tx_collide_15times);
7391         ESTAT_ADD(tx_ucast_packets);
7392         ESTAT_ADD(tx_mcast_packets);
7393         ESTAT_ADD(tx_bcast_packets);
7394         ESTAT_ADD(tx_carrier_sense_errors);
7395         ESTAT_ADD(tx_discards);
7396         ESTAT_ADD(tx_errors);
7397
7398         ESTAT_ADD(dma_writeq_full);
7399         ESTAT_ADD(dma_write_prioq_full);
7400         ESTAT_ADD(rxbds_empty);
7401         ESTAT_ADD(rx_discards);
7402         ESTAT_ADD(rx_errors);
7403         ESTAT_ADD(rx_threshold_hit);
7404
7405         ESTAT_ADD(dma_readq_full);
7406         ESTAT_ADD(dma_read_prioq_full);
7407         ESTAT_ADD(tx_comp_queue_full);
7408
7409         ESTAT_ADD(ring_set_send_prod_index);
7410         ESTAT_ADD(ring_status_update);
7411         ESTAT_ADD(nic_irqs);
7412         ESTAT_ADD(nic_avoided_irqs);
7413         ESTAT_ADD(nic_tx_threshold_hit);
7414
7415         return estats;
7416 }
7417
7418 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7419 {
7420         struct tg3 *tp = netdev_priv(dev);
7421         struct net_device_stats *stats = &tp->net_stats;
7422         struct net_device_stats *old_stats = &tp->net_stats_prev;
7423         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7424
7425         if (!hw_stats)
7426                 return old_stats;
7427
7428         stats->rx_packets = old_stats->rx_packets +
7429                 get_stat64(&hw_stats->rx_ucast_packets) +
7430                 get_stat64(&hw_stats->rx_mcast_packets) +
7431                 get_stat64(&hw_stats->rx_bcast_packets);
7432                 
7433         stats->tx_packets = old_stats->tx_packets +
7434                 get_stat64(&hw_stats->tx_ucast_packets) +
7435                 get_stat64(&hw_stats->tx_mcast_packets) +
7436                 get_stat64(&hw_stats->tx_bcast_packets);
7437
7438         stats->rx_bytes = old_stats->rx_bytes +
7439                 get_stat64(&hw_stats->rx_octets);
7440         stats->tx_bytes = old_stats->tx_bytes +
7441                 get_stat64(&hw_stats->tx_octets);
7442
7443         stats->rx_errors = old_stats->rx_errors +
7444                 get_stat64(&hw_stats->rx_errors);
7445         stats->tx_errors = old_stats->tx_errors +
7446                 get_stat64(&hw_stats->tx_errors) +
7447                 get_stat64(&hw_stats->tx_mac_errors) +
7448                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7449                 get_stat64(&hw_stats->tx_discards);
7450
7451         stats->multicast = old_stats->multicast +
7452                 get_stat64(&hw_stats->rx_mcast_packets);
7453         stats->collisions = old_stats->collisions +
7454                 get_stat64(&hw_stats->tx_collisions);
7455
7456         stats->rx_length_errors = old_stats->rx_length_errors +
7457                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7458                 get_stat64(&hw_stats->rx_undersize_packets);
7459
7460         stats->rx_over_errors = old_stats->rx_over_errors +
7461                 get_stat64(&hw_stats->rxbds_empty);
7462         stats->rx_frame_errors = old_stats->rx_frame_errors +
7463                 get_stat64(&hw_stats->rx_align_errors);
7464         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7465                 get_stat64(&hw_stats->tx_discards);
7466         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7467                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7468
7469         stats->rx_crc_errors = old_stats->rx_crc_errors +
7470                 calc_crc_errors(tp);
7471
7472         stats->rx_missed_errors = old_stats->rx_missed_errors +
7473                 get_stat64(&hw_stats->rx_discards);
7474
7475         return stats;
7476 }
7477
7478 static inline u32 calc_crc(unsigned char *buf, int len)
7479 {
7480         u32 reg;
7481         u32 tmp;
7482         int j, k;
7483
7484         reg = 0xffffffff;
7485
7486         for (j = 0; j < len; j++) {
7487                 reg ^= buf[j];
7488
7489                 for (k = 0; k < 8; k++) {
7490                         tmp = reg & 0x01;
7491
7492                         reg >>= 1;
7493
7494                         if (tmp) {
7495                                 reg ^= 0xedb88320;
7496                         }
7497                 }
7498         }
7499
7500         return ~reg;
7501 }
7502
7503 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7504 {
7505         /* accept or reject all multicast frames */
7506         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7507         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7508         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7509         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7510 }
7511
7512 static void __tg3_set_rx_mode(struct net_device *dev)
7513 {
7514         struct tg3 *tp = netdev_priv(dev);
7515         u32 rx_mode;
7516
7517         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7518                                   RX_MODE_KEEP_VLAN_TAG);
7519
7520         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7521          * flag clear.
7522          */
7523 #if TG3_VLAN_TAG_USED
7524         if (!tp->vlgrp &&
7525             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7526                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7527 #else
7528         /* By definition, VLAN is disabled always in this
7529          * case.
7530          */
7531         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7532                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7533 #endif
7534
7535         if (dev->flags & IFF_PROMISC) {
7536                 /* Promiscuous mode. */
7537                 rx_mode |= RX_MODE_PROMISC;
7538         } else if (dev->flags & IFF_ALLMULTI) {
7539                 /* Accept all multicast. */
7540                 tg3_set_multi (tp, 1);
7541         } else if (dev->mc_count < 1) {
7542                 /* Reject all multicast. */
7543                 tg3_set_multi (tp, 0);
7544         } else {
7545                 /* Accept one or more multicast(s). */
7546                 struct dev_mc_list *mclist;
7547                 unsigned int i;
7548                 u32 mc_filter[4] = { 0, };
7549                 u32 regidx;
7550                 u32 bit;
7551                 u32 crc;
7552
7553                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7554                      i++, mclist = mclist->next) {
7555
7556                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7557                         bit = ~crc & 0x7f;
7558                         regidx = (bit & 0x60) >> 5;
7559                         bit &= 0x1f;
7560                         mc_filter[regidx] |= (1 << bit);
7561                 }
7562
7563                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7564                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7565                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7566                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7567         }
7568
7569         if (rx_mode != tp->rx_mode) {
7570                 tp->rx_mode = rx_mode;
7571                 tw32_f(MAC_RX_MODE, rx_mode);
7572                 udelay(10);
7573         }
7574 }
7575
7576 static void tg3_set_rx_mode(struct net_device *dev)
7577 {
7578         struct tg3 *tp = netdev_priv(dev);
7579
7580         if (!netif_running(dev))
7581                 return;
7582
7583         tg3_full_lock(tp, 0);
7584         __tg3_set_rx_mode(dev);
7585         tg3_full_unlock(tp);
7586 }
7587
7588 #define TG3_REGDUMP_LEN         (32 * 1024)
7589
7590 static int tg3_get_regs_len(struct net_device *dev)
7591 {
7592         return TG3_REGDUMP_LEN;
7593 }
7594
7595 static void tg3_get_regs(struct net_device *dev,
7596                 struct ethtool_regs *regs, void *_p)
7597 {
7598         u32 *p = _p;
7599         struct tg3 *tp = netdev_priv(dev);
7600         u8 *orig_p = _p;
7601         int i;
7602
7603         regs->version = 0;
7604
7605         memset(p, 0, TG3_REGDUMP_LEN);
7606
7607         if (tp->link_config.phy_is_low_power)
7608                 return;
7609
7610         tg3_full_lock(tp, 0);
7611
7612 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7613 #define GET_REG32_LOOP(base,len)                \
7614 do {    p = (u32 *)(orig_p + (base));           \
7615         for (i = 0; i < len; i += 4)            \
7616                 __GET_REG32((base) + i);        \
7617 } while (0)
7618 #define GET_REG32_1(reg)                        \
7619 do {    p = (u32 *)(orig_p + (reg));            \
7620         __GET_REG32((reg));                     \
7621 } while (0)
7622
7623         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7624         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7625         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7626         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7627         GET_REG32_1(SNDDATAC_MODE);
7628         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7629         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7630         GET_REG32_1(SNDBDC_MODE);
7631         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7632         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7633         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7634         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7635         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7636         GET_REG32_1(RCVDCC_MODE);
7637         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7638         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7639         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7640         GET_REG32_1(MBFREE_MODE);
7641         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7642         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7643         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7644         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7645         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7646         GET_REG32_1(RX_CPU_MODE);
7647         GET_REG32_1(RX_CPU_STATE);
7648         GET_REG32_1(RX_CPU_PGMCTR);
7649         GET_REG32_1(RX_CPU_HWBKPT);
7650         GET_REG32_1(TX_CPU_MODE);
7651         GET_REG32_1(TX_CPU_STATE);
7652         GET_REG32_1(TX_CPU_PGMCTR);
7653         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7654         GET_REG32_LOOP(FTQ_RESET, 0x120);
7655         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7656         GET_REG32_1(DMAC_MODE);
7657         GET_REG32_LOOP(GRC_MODE, 0x4c);
7658         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7659                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7660
7661 #undef __GET_REG32
7662 #undef GET_REG32_LOOP
7663 #undef GET_REG32_1
7664
7665         tg3_full_unlock(tp);
7666 }
7667
7668 static int tg3_get_eeprom_len(struct net_device *dev)
7669 {
7670         struct tg3 *tp = netdev_priv(dev);
7671
7672         return tp->nvram_size;
7673 }
7674
7675 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7676 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7677
7678 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7679 {
7680         struct tg3 *tp = netdev_priv(dev);
7681         int ret;
7682         u8  *pd;
7683         u32 i, offset, len, val, b_offset, b_count;
7684
7685         if (tp->link_config.phy_is_low_power)
7686                 return -EAGAIN;
7687
7688         offset = eeprom->offset;
7689         len = eeprom->len;
7690         eeprom->len = 0;
7691
7692         eeprom->magic = TG3_EEPROM_MAGIC;
7693
7694         if (offset & 3) {
7695                 /* adjustments to start on required 4 byte boundary */
7696                 b_offset = offset & 3;
7697                 b_count = 4 - b_offset;
7698                 if (b_count > len) {
7699                         /* i.e. offset=1 len=2 */
7700                         b_count = len;
7701                 }
7702                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7703                 if (ret)
7704                         return ret;
7705                 val = cpu_to_le32(val);
7706                 memcpy(data, ((char*)&val) + b_offset, b_count);
7707                 len -= b_count;
7708                 offset += b_count;
7709                 eeprom->len += b_count;
7710         }
7711
7712         /* read bytes upto the last 4 byte boundary */
7713         pd = &data[eeprom->len];
7714         for (i = 0; i < (len - (len & 3)); i += 4) {
7715                 ret = tg3_nvram_read(tp, offset + i, &val);
7716                 if (ret) {
7717                         eeprom->len += i;
7718                         return ret;
7719                 }
7720                 val = cpu_to_le32(val);
7721                 memcpy(pd + i, &val, 4);
7722         }
7723         eeprom->len += i;
7724
7725         if (len & 3) {
7726                 /* read last bytes not ending on 4 byte boundary */
7727                 pd = &data[eeprom->len];
7728                 b_count = len & 3;
7729                 b_offset = offset + len - b_count;
7730                 ret = tg3_nvram_read(tp, b_offset, &val);
7731                 if (ret)
7732                         return ret;
7733                 val = cpu_to_le32(val);
7734                 memcpy(pd, ((char*)&val), b_count);
7735                 eeprom->len += b_count;
7736         }
7737         return 0;
7738 }
7739
7740 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7741
7742 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7743 {
7744         struct tg3 *tp = netdev_priv(dev);
7745         int ret;
7746         u32 offset, len, b_offset, odd_len, start, end;
7747         u8 *buf;
7748
7749         if (tp->link_config.phy_is_low_power)
7750                 return -EAGAIN;
7751
7752         if (eeprom->magic != TG3_EEPROM_MAGIC)
7753                 return -EINVAL;
7754
7755         offset = eeprom->offset;
7756         len = eeprom->len;
7757
7758         if ((b_offset = (offset & 3))) {
7759                 /* adjustments to start on required 4 byte boundary */
7760                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7761                 if (ret)
7762                         return ret;
7763                 start = cpu_to_le32(start);
7764                 len += b_offset;
7765                 offset &= ~3;
7766                 if (len < 4)
7767                         len = 4;
7768         }
7769
7770         odd_len = 0;
7771         if (len & 3) {
7772                 /* adjustments to end on required 4 byte boundary */
7773                 odd_len = 1;
7774                 len = (len + 3) & ~3;
7775                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7776                 if (ret)
7777                         return ret;
7778                 end = cpu_to_le32(end);
7779         }
7780
7781         buf = data;
7782         if (b_offset || odd_len) {
7783                 buf = kmalloc(len, GFP_KERNEL);
7784                 if (buf == 0)
7785                         return -ENOMEM;
7786                 if (b_offset)
7787                         memcpy(buf, &start, 4);
7788                 if (odd_len)
7789                         memcpy(buf+len-4, &end, 4);
7790                 memcpy(buf + b_offset, data, eeprom->len);
7791         }
7792
7793         ret = tg3_nvram_write_block(tp, offset, len, buf);
7794
7795         if (buf != data)
7796                 kfree(buf);
7797
7798         return ret;
7799 }
7800
7801 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7802 {
7803         struct tg3 *tp = netdev_priv(dev);
7804   
7805         cmd->supported = (SUPPORTED_Autoneg);
7806
7807         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7808                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7809                                    SUPPORTED_1000baseT_Full);
7810
7811         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7812                 cmd->supported |= (SUPPORTED_100baseT_Half |
7813                                   SUPPORTED_100baseT_Full |
7814                                   SUPPORTED_10baseT_Half |
7815                                   SUPPORTED_10baseT_Full |
7816                                   SUPPORTED_MII);
7817                 cmd->port = PORT_TP;
7818         } else {
7819                 cmd->supported |= SUPPORTED_FIBRE;
7820                 cmd->port = PORT_FIBRE;
7821         }
7822   
7823         cmd->advertising = tp->link_config.advertising;
7824         if (netif_running(dev)) {
7825                 cmd->speed = tp->link_config.active_speed;
7826                 cmd->duplex = tp->link_config.active_duplex;
7827         }
7828         cmd->phy_address = PHY_ADDR;
7829         cmd->transceiver = 0;
7830         cmd->autoneg = tp->link_config.autoneg;
7831         cmd->maxtxpkt = 0;
7832         cmd->maxrxpkt = 0;
7833         return 0;
7834 }
7835   
7836 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7837 {
7838         struct tg3 *tp = netdev_priv(dev);
7839   
7840         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7841                 /* These are the only valid advertisement bits allowed.  */
7842                 if (cmd->autoneg == AUTONEG_ENABLE &&
7843                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7844                                           ADVERTISED_1000baseT_Full |
7845                                           ADVERTISED_Autoneg |
7846                                           ADVERTISED_FIBRE)))
7847                         return -EINVAL;
7848                 /* Fiber can only do SPEED_1000.  */
7849                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7850                          (cmd->speed != SPEED_1000))
7851                         return -EINVAL;
7852         /* Copper cannot force SPEED_1000.  */
7853         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7854                    (cmd->speed == SPEED_1000))
7855                 return -EINVAL;
7856         else if ((cmd->speed == SPEED_1000) &&
7857                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7858                 return -EINVAL;
7859
7860         tg3_full_lock(tp, 0);
7861
7862         tp->link_config.autoneg = cmd->autoneg;
7863         if (cmd->autoneg == AUTONEG_ENABLE) {
7864                 tp->link_config.advertising = cmd->advertising;
7865                 tp->link_config.speed = SPEED_INVALID;
7866                 tp->link_config.duplex = DUPLEX_INVALID;
7867         } else {
7868                 tp->link_config.advertising = 0;
7869                 tp->link_config.speed = cmd->speed;
7870                 tp->link_config.duplex = cmd->duplex;
7871         }
7872   
7873         if (netif_running(dev))
7874                 tg3_setup_phy(tp, 1);
7875
7876         tg3_full_unlock(tp);
7877   
7878         return 0;
7879 }
7880   
7881 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7882 {
7883         struct tg3 *tp = netdev_priv(dev);
7884   
7885         strcpy(info->driver, DRV_MODULE_NAME);
7886         strcpy(info->version, DRV_MODULE_VERSION);
7887         strcpy(info->fw_version, tp->fw_ver);
7888         strcpy(info->bus_info, pci_name(tp->pdev));
7889 }
7890   
7891 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7892 {
7893         struct tg3 *tp = netdev_priv(dev);
7894   
7895         wol->supported = WAKE_MAGIC;
7896         wol->wolopts = 0;
7897         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7898                 wol->wolopts = WAKE_MAGIC;
7899         memset(&wol->sopass, 0, sizeof(wol->sopass));
7900 }
7901   
7902 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7903 {
7904         struct tg3 *tp = netdev_priv(dev);
7905   
7906         if (wol->wolopts & ~WAKE_MAGIC)
7907                 return -EINVAL;
7908         if ((wol->wolopts & WAKE_MAGIC) &&
7909             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7910             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7911                 return -EINVAL;
7912   
7913         spin_lock_bh(&tp->lock);
7914         if (wol->wolopts & WAKE_MAGIC)
7915                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7916         else
7917                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7918         spin_unlock_bh(&tp->lock);
7919   
7920         return 0;
7921 }
7922   
7923 static u32 tg3_get_msglevel(struct net_device *dev)
7924 {
7925         struct tg3 *tp = netdev_priv(dev);
7926         return tp->msg_enable;
7927 }
7928   
7929 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7930 {
7931         struct tg3 *tp = netdev_priv(dev);
7932         tp->msg_enable = value;
7933 }
7934   
7935 #if TG3_TSO_SUPPORT != 0
7936 static int tg3_set_tso(struct net_device *dev, u32 value)
7937 {
7938         struct tg3 *tp = netdev_priv(dev);
7939
7940         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7941                 if (value)
7942                         return -EINVAL;
7943                 return 0;
7944         }
7945         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7946                 if (value)
7947                         dev->features |= NETIF_F_TSO6;
7948                 else
7949                         dev->features &= ~NETIF_F_TSO6;
7950         }
7951         return ethtool_op_set_tso(dev, value);
7952 }
7953 #endif
7954   
7955 static int tg3_nway_reset(struct net_device *dev)
7956 {
7957         struct tg3 *tp = netdev_priv(dev);
7958         u32 bmcr;
7959         int r;
7960   
7961         if (!netif_running(dev))
7962                 return -EAGAIN;
7963
7964         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7965                 return -EINVAL;
7966
7967         spin_lock_bh(&tp->lock);
7968         r = -EINVAL;
7969         tg3_readphy(tp, MII_BMCR, &bmcr);
7970         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7971             ((bmcr & BMCR_ANENABLE) ||
7972              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7973                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7974                                            BMCR_ANENABLE);
7975                 r = 0;
7976         }
7977         spin_unlock_bh(&tp->lock);
7978   
7979         return r;
7980 }
7981   
7982 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7983 {
7984         struct tg3 *tp = netdev_priv(dev);
7985   
7986         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7987         ering->rx_mini_max_pending = 0;
7988         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7989                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7990         else
7991                 ering->rx_jumbo_max_pending = 0;
7992
7993         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7994
7995         ering->rx_pending = tp->rx_pending;
7996         ering->rx_mini_pending = 0;
7997         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7998                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7999         else
8000                 ering->rx_jumbo_pending = 0;
8001
8002         ering->tx_pending = tp->tx_pending;
8003 }
8004   
8005 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8006 {
8007         struct tg3 *tp = netdev_priv(dev);
8008         int irq_sync = 0, err = 0;
8009   
8010         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8011             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8012             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8013                 return -EINVAL;
8014   
8015         if (netif_running(dev)) {
8016                 tg3_netif_stop(tp);
8017                 irq_sync = 1;
8018         }
8019
8020         tg3_full_lock(tp, irq_sync);
8021   
8022         tp->rx_pending = ering->rx_pending;
8023
8024         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8025             tp->rx_pending > 63)
8026                 tp->rx_pending = 63;
8027         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8028         tp->tx_pending = ering->tx_pending;
8029
8030         if (netif_running(dev)) {
8031                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8032                 err = tg3_restart_hw(tp, 1);
8033                 if (!err)
8034                         tg3_netif_start(tp);
8035         }
8036
8037         tg3_full_unlock(tp);
8038   
8039         return err;
8040 }
8041   
8042 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8043 {
8044         struct tg3 *tp = netdev_priv(dev);
8045   
8046         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8047         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8048         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8049 }
8050   
8051 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8052 {
8053         struct tg3 *tp = netdev_priv(dev);
8054         int irq_sync = 0, err = 0;
8055   
8056         if (netif_running(dev)) {
8057                 tg3_netif_stop(tp);
8058                 irq_sync = 1;
8059         }
8060
8061         tg3_full_lock(tp, irq_sync);
8062
8063         if (epause->autoneg)
8064                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8065         else
8066                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8067         if (epause->rx_pause)
8068                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8069         else
8070                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8071         if (epause->tx_pause)
8072                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8073         else
8074                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8075
8076         if (netif_running(dev)) {
8077                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8078                 err = tg3_restart_hw(tp, 1);
8079                 if (!err)
8080                         tg3_netif_start(tp);
8081         }
8082
8083         tg3_full_unlock(tp);
8084   
8085         return err;
8086 }
8087   
8088 static u32 tg3_get_rx_csum(struct net_device *dev)
8089 {
8090         struct tg3 *tp = netdev_priv(dev);
8091         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8092 }
8093   
8094 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8095 {
8096         struct tg3 *tp = netdev_priv(dev);
8097   
8098         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8099                 if (data != 0)
8100                         return -EINVAL;
8101                 return 0;
8102         }
8103   
8104         spin_lock_bh(&tp->lock);
8105         if (data)
8106                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8107         else
8108                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8109         spin_unlock_bh(&tp->lock);
8110   
8111         return 0;
8112 }
8113   
8114 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8115 {
8116         struct tg3 *tp = netdev_priv(dev);
8117   
8118         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8119                 if (data != 0)
8120                         return -EINVAL;
8121                 return 0;
8122         }
8123   
8124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8126                 ethtool_op_set_tx_hw_csum(dev, data);
8127         else
8128                 ethtool_op_set_tx_csum(dev, data);
8129
8130         return 0;
8131 }
8132
8133 static int tg3_get_stats_count (struct net_device *dev)
8134 {
8135         return TG3_NUM_STATS;
8136 }
8137
8138 static int tg3_get_test_count (struct net_device *dev)
8139 {
8140         return TG3_NUM_TEST;
8141 }
8142
8143 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8144 {
8145         switch (stringset) {
8146         case ETH_SS_STATS:
8147                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8148                 break;
8149         case ETH_SS_TEST:
8150                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8151                 break;
8152         default:
8153                 WARN_ON(1);     /* we need a WARN() */
8154                 break;
8155         }
8156 }
8157
8158 static int tg3_phys_id(struct net_device *dev, u32 data)
8159 {
8160         struct tg3 *tp = netdev_priv(dev);
8161         int i;
8162
8163         if (!netif_running(tp->dev))
8164                 return -EAGAIN;
8165
8166         if (data == 0)
8167                 data = 2;
8168
8169         for (i = 0; i < (data * 2); i++) {
8170                 if ((i % 2) == 0)
8171                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8172                                            LED_CTRL_1000MBPS_ON |
8173                                            LED_CTRL_100MBPS_ON |
8174                                            LED_CTRL_10MBPS_ON |
8175                                            LED_CTRL_TRAFFIC_OVERRIDE |
8176                                            LED_CTRL_TRAFFIC_BLINK |
8177                                            LED_CTRL_TRAFFIC_LED);
8178         
8179                 else
8180                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8181                                            LED_CTRL_TRAFFIC_OVERRIDE);
8182
8183                 if (msleep_interruptible(500))
8184                         break;
8185         }
8186         tw32(MAC_LED_CTRL, tp->led_ctrl);
8187         return 0;
8188 }
8189
8190 static void tg3_get_ethtool_stats (struct net_device *dev,
8191                                    struct ethtool_stats *estats, u64 *tmp_stats)
8192 {
8193         struct tg3 *tp = netdev_priv(dev);
8194         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8195 }
8196
8197 #define NVRAM_TEST_SIZE 0x100
8198 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8199
8200 static int tg3_test_nvram(struct tg3 *tp)
8201 {
8202         u32 *buf, csum, magic;
8203         int i, j, err = 0, size;
8204
8205         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8206                 return -EIO;
8207
8208         if (magic == TG3_EEPROM_MAGIC)
8209                 size = NVRAM_TEST_SIZE;
8210         else if ((magic & 0xff000000) == 0xa5000000) {
8211                 if ((magic & 0xe00000) == 0x200000)
8212                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8213                 else
8214                         return 0;
8215         } else
8216                 return -EIO;
8217
8218         buf = kmalloc(size, GFP_KERNEL);
8219         if (buf == NULL)
8220                 return -ENOMEM;
8221
8222         err = -EIO;
8223         for (i = 0, j = 0; i < size; i += 4, j++) {
8224                 u32 val;
8225
8226                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8227                         break;
8228                 buf[j] = cpu_to_le32(val);
8229         }
8230         if (i < size)
8231                 goto out;
8232
8233         /* Selfboot format */
8234         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8235                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8236
8237                 for (i = 0; i < size; i++)
8238                         csum8 += buf8[i];
8239
8240                 if (csum8 == 0) {
8241                         err = 0;
8242                         goto out;
8243                 }
8244
8245                 err = -EIO;
8246                 goto out;
8247         }
8248
8249         /* Bootstrap checksum at offset 0x10 */
8250         csum = calc_crc((unsigned char *) buf, 0x10);
8251         if(csum != cpu_to_le32(buf[0x10/4]))
8252                 goto out;
8253
8254         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8255         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8256         if (csum != cpu_to_le32(buf[0xfc/4]))
8257                  goto out;
8258
8259         err = 0;
8260
8261 out:
8262         kfree(buf);
8263         return err;
8264 }
8265
8266 #define TG3_SERDES_TIMEOUT_SEC  2
8267 #define TG3_COPPER_TIMEOUT_SEC  6
8268
8269 static int tg3_test_link(struct tg3 *tp)
8270 {
8271         int i, max;
8272
8273         if (!netif_running(tp->dev))
8274                 return -ENODEV;
8275
8276         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8277                 max = TG3_SERDES_TIMEOUT_SEC;
8278         else
8279                 max = TG3_COPPER_TIMEOUT_SEC;
8280
8281         for (i = 0; i < max; i++) {
8282                 if (netif_carrier_ok(tp->dev))
8283                         return 0;
8284
8285                 if (msleep_interruptible(1000))
8286                         break;
8287         }
8288
8289         return -EIO;
8290 }
8291
8292 /* Only test the commonly used registers */
8293 static int tg3_test_registers(struct tg3 *tp)
8294 {
8295         int i, is_5705;
8296         u32 offset, read_mask, write_mask, val, save_val, read_val;
8297         static struct {
8298                 u16 offset;
8299                 u16 flags;
8300 #define TG3_FL_5705     0x1
8301 #define TG3_FL_NOT_5705 0x2
8302 #define TG3_FL_NOT_5788 0x4
8303                 u32 read_mask;
8304                 u32 write_mask;
8305         } reg_tbl[] = {
8306                 /* MAC Control Registers */
8307                 { MAC_MODE, TG3_FL_NOT_5705,
8308                         0x00000000, 0x00ef6f8c },
8309                 { MAC_MODE, TG3_FL_5705,
8310                         0x00000000, 0x01ef6b8c },
8311                 { MAC_STATUS, TG3_FL_NOT_5705,
8312                         0x03800107, 0x00000000 },
8313                 { MAC_STATUS, TG3_FL_5705,
8314                         0x03800100, 0x00000000 },
8315                 { MAC_ADDR_0_HIGH, 0x0000,
8316                         0x00000000, 0x0000ffff },
8317                 { MAC_ADDR_0_LOW, 0x0000,
8318                         0x00000000, 0xffffffff },
8319                 { MAC_RX_MTU_SIZE, 0x0000,
8320                         0x00000000, 0x0000ffff },
8321                 { MAC_TX_MODE, 0x0000,
8322                         0x00000000, 0x00000070 },
8323                 { MAC_TX_LENGTHS, 0x0000,
8324                         0x00000000, 0x00003fff },
8325                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8326                         0x00000000, 0x000007fc },
8327                 { MAC_RX_MODE, TG3_FL_5705,
8328                         0x00000000, 0x000007dc },
8329                 { MAC_HASH_REG_0, 0x0000,
8330                         0x00000000, 0xffffffff },
8331                 { MAC_HASH_REG_1, 0x0000,
8332                         0x00000000, 0xffffffff },
8333                 { MAC_HASH_REG_2, 0x0000,
8334                         0x00000000, 0xffffffff },
8335                 { MAC_HASH_REG_3, 0x0000,
8336                         0x00000000, 0xffffffff },
8337
8338                 /* Receive Data and Receive BD Initiator Control Registers. */
8339                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8340                         0x00000000, 0xffffffff },
8341                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8342                         0x00000000, 0xffffffff },
8343                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8344                         0x00000000, 0x00000003 },
8345                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8346                         0x00000000, 0xffffffff },
8347                 { RCVDBDI_STD_BD+0, 0x0000,
8348                         0x00000000, 0xffffffff },
8349                 { RCVDBDI_STD_BD+4, 0x0000,
8350                         0x00000000, 0xffffffff },
8351                 { RCVDBDI_STD_BD+8, 0x0000,
8352                         0x00000000, 0xffff0002 },
8353                 { RCVDBDI_STD_BD+0xc, 0x0000,
8354                         0x00000000, 0xffffffff },
8355         
8356                 /* Receive BD Initiator Control Registers. */
8357                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8358                         0x00000000, 0xffffffff },
8359                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8360                         0x00000000, 0x000003ff },
8361                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8362                         0x00000000, 0xffffffff },
8363         
8364                 /* Host Coalescing Control Registers. */
8365                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8366                         0x00000000, 0x00000004 },
8367                 { HOSTCC_MODE, TG3_FL_5705,
8368                         0x00000000, 0x000000f6 },
8369                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8370                         0x00000000, 0xffffffff },
8371                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8372                         0x00000000, 0x000003ff },
8373                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8374                         0x00000000, 0xffffffff },
8375                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8376                         0x00000000, 0x000003ff },
8377                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8378                         0x00000000, 0xffffffff },
8379                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8380                         0x00000000, 0x000000ff },
8381                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8382                         0x00000000, 0xffffffff },
8383                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8384                         0x00000000, 0x000000ff },
8385                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8386                         0x00000000, 0xffffffff },
8387                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8388                         0x00000000, 0xffffffff },
8389                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8390                         0x00000000, 0xffffffff },
8391                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8392                         0x00000000, 0x000000ff },
8393                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8394                         0x00000000, 0xffffffff },
8395                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8396                         0x00000000, 0x000000ff },
8397                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8398                         0x00000000, 0xffffffff },
8399                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8400                         0x00000000, 0xffffffff },
8401                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8402                         0x00000000, 0xffffffff },
8403                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8404                         0x00000000, 0xffffffff },
8405                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8406                         0x00000000, 0xffffffff },
8407                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8408                         0xffffffff, 0x00000000 },
8409                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8410                         0xffffffff, 0x00000000 },
8411
8412                 /* Buffer Manager Control Registers. */
8413                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8414                         0x00000000, 0x007fff80 },
8415                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8416                         0x00000000, 0x007fffff },
8417                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8418                         0x00000000, 0x0000003f },
8419                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8420                         0x00000000, 0x000001ff },
8421                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8422                         0x00000000, 0x000001ff },
8423                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8424                         0xffffffff, 0x00000000 },
8425                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8426                         0xffffffff, 0x00000000 },
8427         
8428                 /* Mailbox Registers */
8429                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8430                         0x00000000, 0x000001ff },
8431                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8432                         0x00000000, 0x000001ff },
8433                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8434                         0x00000000, 0x000007ff },
8435                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8436                         0x00000000, 0x000001ff },
8437
8438                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8439         };
8440
8441         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8442                 is_5705 = 1;
8443         else
8444                 is_5705 = 0;
8445
8446         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8447                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8448                         continue;
8449
8450                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8451                         continue;
8452
8453                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8454                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8455                         continue;
8456
8457                 offset = (u32) reg_tbl[i].offset;
8458                 read_mask = reg_tbl[i].read_mask;
8459                 write_mask = reg_tbl[i].write_mask;
8460
8461                 /* Save the original register content */
8462                 save_val = tr32(offset);
8463
8464                 /* Determine the read-only value. */
8465                 read_val = save_val & read_mask;
8466
8467                 /* Write zero to the register, then make sure the read-only bits
8468                  * are not changed and the read/write bits are all zeros.
8469                  */
8470                 tw32(offset, 0);
8471
8472                 val = tr32(offset);
8473
8474                 /* Test the read-only and read/write bits. */
8475                 if (((val & read_mask) != read_val) || (val & write_mask))
8476                         goto out;
8477
8478                 /* Write ones to all the bits defined by RdMask and WrMask, then
8479                  * make sure the read-only bits are not changed and the
8480                  * read/write bits are all ones.
8481                  */
8482                 tw32(offset, read_mask | write_mask);
8483
8484                 val = tr32(offset);
8485
8486                 /* Test the read-only bits. */
8487                 if ((val & read_mask) != read_val)
8488                         goto out;
8489
8490                 /* Test the read/write bits. */
8491                 if ((val & write_mask) != write_mask)
8492                         goto out;
8493
8494                 tw32(offset, save_val);
8495         }
8496
8497         return 0;
8498
8499 out:
8500         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8501         tw32(offset, save_val);
8502         return -EIO;
8503 }
8504
8505 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8506 {
8507         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8508         int i;
8509         u32 j;
8510
8511         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8512                 for (j = 0; j < len; j += 4) {
8513                         u32 val;
8514
8515                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8516                         tg3_read_mem(tp, offset + j, &val);
8517                         if (val != test_pattern[i])
8518                                 return -EIO;
8519                 }
8520         }
8521         return 0;
8522 }
8523
8524 static int tg3_test_memory(struct tg3 *tp)
8525 {
8526         static struct mem_entry {
8527                 u32 offset;
8528                 u32 len;
8529         } mem_tbl_570x[] = {
8530                 { 0x00000000, 0x00b50},
8531                 { 0x00002000, 0x1c000},
8532                 { 0xffffffff, 0x00000}
8533         }, mem_tbl_5705[] = {
8534                 { 0x00000100, 0x0000c},
8535                 { 0x00000200, 0x00008},
8536                 { 0x00004000, 0x00800},
8537                 { 0x00006000, 0x01000},
8538                 { 0x00008000, 0x02000},
8539                 { 0x00010000, 0x0e000},
8540                 { 0xffffffff, 0x00000}
8541         }, mem_tbl_5755[] = {
8542                 { 0x00000200, 0x00008},
8543                 { 0x00004000, 0x00800},
8544                 { 0x00006000, 0x00800},
8545                 { 0x00008000, 0x02000},
8546                 { 0x00010000, 0x0c000},
8547                 { 0xffffffff, 0x00000}
8548         };
8549         struct mem_entry *mem_tbl;
8550         int err = 0;
8551         int i;
8552
8553         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8554                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8555                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8556                         mem_tbl = mem_tbl_5755;
8557                 else
8558                         mem_tbl = mem_tbl_5705;
8559         } else
8560                 mem_tbl = mem_tbl_570x;
8561
8562         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8563                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8564                     mem_tbl[i].len)) != 0)
8565                         break;
8566         }
8567         
8568         return err;
8569 }
8570
8571 #define TG3_MAC_LOOPBACK        0
8572 #define TG3_PHY_LOOPBACK        1
8573
8574 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8575 {
8576         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8577         u32 desc_idx;
8578         struct sk_buff *skb, *rx_skb;
8579         u8 *tx_data;
8580         dma_addr_t map;
8581         int num_pkts, tx_len, rx_len, i, err;
8582         struct tg3_rx_buffer_desc *desc;
8583
8584         if (loopback_mode == TG3_MAC_LOOPBACK) {
8585                 /* HW errata - mac loopback fails in some cases on 5780.
8586                  * Normal traffic and PHY loopback are not affected by
8587                  * errata.
8588                  */
8589                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8590                         return 0;
8591
8592                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8593                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8594                            MAC_MODE_PORT_MODE_GMII;
8595                 tw32(MAC_MODE, mac_mode);
8596         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8597                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8598                                            BMCR_SPEED1000);
8599                 udelay(40);
8600                 /* reset to prevent losing 1st rx packet intermittently */
8601                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8602                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8603                         udelay(10);
8604                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8605                 }
8606                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8607                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8608                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8609                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8610                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8611                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8612                 }
8613                 tw32(MAC_MODE, mac_mode);
8614         }
8615         else
8616                 return -EINVAL;
8617
8618         err = -EIO;
8619
8620         tx_len = 1514;
8621         skb = netdev_alloc_skb(tp->dev, tx_len);
8622         if (!skb)
8623                 return -ENOMEM;
8624
8625         tx_data = skb_put(skb, tx_len);
8626         memcpy(tx_data, tp->dev->dev_addr, 6);
8627         memset(tx_data + 6, 0x0, 8);
8628
8629         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8630
8631         for (i = 14; i < tx_len; i++)
8632                 tx_data[i] = (u8) (i & 0xff);
8633
8634         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8635
8636         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8637              HOSTCC_MODE_NOW);
8638
8639         udelay(10);
8640
8641         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8642
8643         num_pkts = 0;
8644
8645         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8646
8647         tp->tx_prod++;
8648         num_pkts++;
8649
8650         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8651                      tp->tx_prod);
8652         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8653
8654         udelay(10);
8655
8656         for (i = 0; i < 10; i++) {
8657                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8658                        HOSTCC_MODE_NOW);
8659
8660                 udelay(10);
8661
8662                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8663                 rx_idx = tp->hw_status->idx[0].rx_producer;
8664                 if ((tx_idx == tp->tx_prod) &&
8665                     (rx_idx == (rx_start_idx + num_pkts)))
8666                         break;
8667         }
8668
8669         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8670         dev_kfree_skb(skb);
8671
8672         if (tx_idx != tp->tx_prod)
8673                 goto out;
8674
8675         if (rx_idx != rx_start_idx + num_pkts)
8676                 goto out;
8677
8678         desc = &tp->rx_rcb[rx_start_idx];
8679         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8680         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8681         if (opaque_key != RXD_OPAQUE_RING_STD)
8682                 goto out;
8683
8684         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8685             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8686                 goto out;
8687
8688         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8689         if (rx_len != tx_len)
8690                 goto out;
8691
8692         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8693
8694         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8695         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8696
8697         for (i = 14; i < tx_len; i++) {
8698                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8699                         goto out;
8700         }
8701         err = 0;
8702         
8703         /* tg3_free_rings will unmap and free the rx_skb */
8704 out:
8705         return err;
8706 }
8707
8708 #define TG3_MAC_LOOPBACK_FAILED         1
8709 #define TG3_PHY_LOOPBACK_FAILED         2
8710 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8711                                          TG3_PHY_LOOPBACK_FAILED)
8712
8713 static int tg3_test_loopback(struct tg3 *tp)
8714 {
8715         int err = 0;
8716
8717         if (!netif_running(tp->dev))
8718                 return TG3_LOOPBACK_FAILED;
8719
8720         err = tg3_reset_hw(tp, 1);
8721         if (err)
8722                 return TG3_LOOPBACK_FAILED;
8723
8724         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8725                 err |= TG3_MAC_LOOPBACK_FAILED;
8726         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8727                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8728                         err |= TG3_PHY_LOOPBACK_FAILED;
8729         }
8730
8731         return err;
8732 }
8733
8734 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8735                           u64 *data)
8736 {
8737         struct tg3 *tp = netdev_priv(dev);
8738
8739         if (tp->link_config.phy_is_low_power)
8740                 tg3_set_power_state(tp, PCI_D0);
8741
8742         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8743
8744         if (tg3_test_nvram(tp) != 0) {
8745                 etest->flags |= ETH_TEST_FL_FAILED;
8746                 data[0] = 1;
8747         }
8748         if (tg3_test_link(tp) != 0) {
8749                 etest->flags |= ETH_TEST_FL_FAILED;
8750                 data[1] = 1;
8751         }
8752         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8753                 int err, irq_sync = 0;
8754
8755                 if (netif_running(dev)) {
8756                         tg3_netif_stop(tp);
8757                         irq_sync = 1;
8758                 }
8759
8760                 tg3_full_lock(tp, irq_sync);
8761
8762                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8763                 err = tg3_nvram_lock(tp);
8764                 tg3_halt_cpu(tp, RX_CPU_BASE);
8765                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8766                         tg3_halt_cpu(tp, TX_CPU_BASE);
8767                 if (!err)
8768                         tg3_nvram_unlock(tp);
8769
8770                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8771                         tg3_phy_reset(tp);
8772
8773                 if (tg3_test_registers(tp) != 0) {
8774                         etest->flags |= ETH_TEST_FL_FAILED;
8775                         data[2] = 1;
8776                 }
8777                 if (tg3_test_memory(tp) != 0) {
8778                         etest->flags |= ETH_TEST_FL_FAILED;
8779                         data[3] = 1;
8780                 }
8781                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8782                         etest->flags |= ETH_TEST_FL_FAILED;
8783
8784                 tg3_full_unlock(tp);
8785
8786                 if (tg3_test_interrupt(tp) != 0) {
8787                         etest->flags |= ETH_TEST_FL_FAILED;
8788                         data[5] = 1;
8789                 }
8790
8791                 tg3_full_lock(tp, 0);
8792
8793                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8794                 if (netif_running(dev)) {
8795                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8796                         if (!tg3_restart_hw(tp, 1))
8797                                 tg3_netif_start(tp);
8798                 }
8799
8800                 tg3_full_unlock(tp);
8801         }
8802         if (tp->link_config.phy_is_low_power)
8803                 tg3_set_power_state(tp, PCI_D3hot);
8804
8805 }
8806
8807 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8808 {
8809         struct mii_ioctl_data *data = if_mii(ifr);
8810         struct tg3 *tp = netdev_priv(dev);
8811         int err;
8812
8813         switch(cmd) {
8814         case SIOCGMIIPHY:
8815                 data->phy_id = PHY_ADDR;
8816
8817                 /* fallthru */
8818         case SIOCGMIIREG: {
8819                 u32 mii_regval;
8820
8821                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8822                         break;                  /* We have no PHY */
8823
8824                 if (tp->link_config.phy_is_low_power)
8825                         return -EAGAIN;
8826
8827                 spin_lock_bh(&tp->lock);
8828                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8829                 spin_unlock_bh(&tp->lock);
8830
8831                 data->val_out = mii_regval;
8832
8833                 return err;
8834         }
8835
8836         case SIOCSMIIREG:
8837                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8838                         break;                  /* We have no PHY */
8839
8840                 if (!capable(CAP_NET_ADMIN))
8841                         return -EPERM;
8842
8843                 if (tp->link_config.phy_is_low_power)
8844                         return -EAGAIN;
8845
8846                 spin_lock_bh(&tp->lock);
8847                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8848                 spin_unlock_bh(&tp->lock);
8849
8850                 return err;
8851
8852         default:
8853                 /* do nothing */
8854                 break;
8855         }
8856         return -EOPNOTSUPP;
8857 }
8858
8859 #if TG3_VLAN_TAG_USED
8860 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8861 {
8862         struct tg3 *tp = netdev_priv(dev);
8863
8864         if (netif_running(dev))
8865                 tg3_netif_stop(tp);
8866
8867         tg3_full_lock(tp, 0);
8868
8869         tp->vlgrp = grp;
8870
8871         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8872         __tg3_set_rx_mode(dev);
8873
8874         tg3_full_unlock(tp);
8875
8876         if (netif_running(dev))
8877                 tg3_netif_start(tp);
8878 }
8879
8880 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8881 {
8882         struct tg3 *tp = netdev_priv(dev);
8883
8884         if (netif_running(dev))
8885                 tg3_netif_stop(tp);
8886
8887         tg3_full_lock(tp, 0);
8888         if (tp->vlgrp)
8889                 tp->vlgrp->vlan_devices[vid] = NULL;
8890         tg3_full_unlock(tp);
8891
8892         if (netif_running(dev))
8893                 tg3_netif_start(tp);
8894 }
8895 #endif
8896
8897 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8898 {
8899         struct tg3 *tp = netdev_priv(dev);
8900
8901         memcpy(ec, &tp->coal, sizeof(*ec));
8902         return 0;
8903 }
8904
8905 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8906 {
8907         struct tg3 *tp = netdev_priv(dev);
8908         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8909         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8910
8911         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8912                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8913                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8914                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8915                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8916         }
8917
8918         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8919             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8920             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8921             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8922             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8923             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8924             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8925             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8926             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8927             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8928                 return -EINVAL;
8929
8930         /* No rx interrupts will be generated if both are zero */
8931         if ((ec->rx_coalesce_usecs == 0) &&
8932             (ec->rx_max_coalesced_frames == 0))
8933                 return -EINVAL;
8934
8935         /* No tx interrupts will be generated if both are zero */
8936         if ((ec->tx_coalesce_usecs == 0) &&
8937             (ec->tx_max_coalesced_frames == 0))
8938                 return -EINVAL;
8939
8940         /* Only copy relevant parameters, ignore all others. */
8941         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8942         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8943         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8944         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8945         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8946         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8947         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8948         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8949         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8950
8951         if (netif_running(dev)) {
8952                 tg3_full_lock(tp, 0);
8953                 __tg3_set_coalesce(tp, &tp->coal);
8954                 tg3_full_unlock(tp);
8955         }
8956         return 0;
8957 }
8958
8959 static struct ethtool_ops tg3_ethtool_ops = {
8960         .get_settings           = tg3_get_settings,
8961         .set_settings           = tg3_set_settings,
8962         .get_drvinfo            = tg3_get_drvinfo,
8963         .get_regs_len           = tg3_get_regs_len,
8964         .get_regs               = tg3_get_regs,
8965         .get_wol                = tg3_get_wol,
8966         .set_wol                = tg3_set_wol,
8967         .get_msglevel           = tg3_get_msglevel,
8968         .set_msglevel           = tg3_set_msglevel,
8969         .nway_reset             = tg3_nway_reset,
8970         .get_link               = ethtool_op_get_link,
8971         .get_eeprom_len         = tg3_get_eeprom_len,
8972         .get_eeprom             = tg3_get_eeprom,
8973         .set_eeprom             = tg3_set_eeprom,
8974         .get_ringparam          = tg3_get_ringparam,
8975         .set_ringparam          = tg3_set_ringparam,
8976         .get_pauseparam         = tg3_get_pauseparam,
8977         .set_pauseparam         = tg3_set_pauseparam,
8978         .get_rx_csum            = tg3_get_rx_csum,
8979         .set_rx_csum            = tg3_set_rx_csum,
8980         .get_tx_csum            = ethtool_op_get_tx_csum,
8981         .set_tx_csum            = tg3_set_tx_csum,
8982         .get_sg                 = ethtool_op_get_sg,
8983         .set_sg                 = ethtool_op_set_sg,
8984 #if TG3_TSO_SUPPORT != 0
8985         .get_tso                = ethtool_op_get_tso,
8986         .set_tso                = tg3_set_tso,
8987 #endif
8988         .self_test_count        = tg3_get_test_count,
8989         .self_test              = tg3_self_test,
8990         .get_strings            = tg3_get_strings,
8991         .phys_id                = tg3_phys_id,
8992         .get_stats_count        = tg3_get_stats_count,
8993         .get_ethtool_stats      = tg3_get_ethtool_stats,
8994         .get_coalesce           = tg3_get_coalesce,
8995         .set_coalesce           = tg3_set_coalesce,
8996         .get_perm_addr          = ethtool_op_get_perm_addr,
8997 };
8998
8999 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9000 {
9001         u32 cursize, val, magic;
9002
9003         tp->nvram_size = EEPROM_CHIP_SIZE;
9004
9005         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9006                 return;
9007
9008         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9009                 return;
9010
9011         /*
9012          * Size the chip by reading offsets at increasing powers of two.
9013          * When we encounter our validation signature, we know the addressing
9014          * has wrapped around, and thus have our chip size.
9015          */
9016         cursize = 0x10;
9017
9018         while (cursize < tp->nvram_size) {
9019                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9020                         return;
9021
9022                 if (val == magic)
9023                         break;
9024
9025                 cursize <<= 1;
9026         }
9027
9028         tp->nvram_size = cursize;
9029 }
9030                 
9031 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9032 {
9033         u32 val;
9034
9035         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9036                 return;
9037
9038         /* Selfboot format */
9039         if (val != TG3_EEPROM_MAGIC) {
9040                 tg3_get_eeprom_size(tp);
9041                 return;
9042         }
9043
9044         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9045                 if (val != 0) {
9046                         tp->nvram_size = (val >> 16) * 1024;
9047                         return;
9048                 }
9049         }
9050         tp->nvram_size = 0x20000;
9051 }
9052
9053 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9054 {
9055         u32 nvcfg1;
9056
9057         nvcfg1 = tr32(NVRAM_CFG1);
9058         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9059                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9060         }
9061         else {
9062                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9063                 tw32(NVRAM_CFG1, nvcfg1);
9064         }
9065
9066         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9067             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9068                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9069                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9070                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9071                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9072                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9073                                 break;
9074                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9075                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9076                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9077                                 break;
9078                         case FLASH_VENDOR_ATMEL_EEPROM:
9079                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9080                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9081                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9082                                 break;
9083                         case FLASH_VENDOR_ST:
9084                                 tp->nvram_jedecnum = JEDEC_ST;
9085                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9086                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9087                                 break;
9088                         case FLASH_VENDOR_SAIFUN:
9089                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9090                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9091                                 break;
9092                         case FLASH_VENDOR_SST_SMALL:
9093                         case FLASH_VENDOR_SST_LARGE:
9094                                 tp->nvram_jedecnum = JEDEC_SST;
9095                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9096                                 break;
9097                 }
9098         }
9099         else {
9100                 tp->nvram_jedecnum = JEDEC_ATMEL;
9101                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9102                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9103         }
9104 }
9105
9106 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9107 {
9108         u32 nvcfg1;
9109
9110         nvcfg1 = tr32(NVRAM_CFG1);
9111
9112         /* NVRAM protection for TPM */
9113         if (nvcfg1 & (1 << 27))
9114                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9115
9116         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9117                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9118                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9119                         tp->nvram_jedecnum = JEDEC_ATMEL;
9120                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9121                         break;
9122                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9123                         tp->nvram_jedecnum = JEDEC_ATMEL;
9124                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9125                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9126                         break;
9127                 case FLASH_5752VENDOR_ST_M45PE10:
9128                 case FLASH_5752VENDOR_ST_M45PE20:
9129                 case FLASH_5752VENDOR_ST_M45PE40:
9130                         tp->nvram_jedecnum = JEDEC_ST;
9131                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9132                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9133                         break;
9134         }
9135
9136         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9137                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9138                         case FLASH_5752PAGE_SIZE_256:
9139                                 tp->nvram_pagesize = 256;
9140                                 break;
9141                         case FLASH_5752PAGE_SIZE_512:
9142                                 tp->nvram_pagesize = 512;
9143                                 break;
9144                         case FLASH_5752PAGE_SIZE_1K:
9145                                 tp->nvram_pagesize = 1024;
9146                                 break;
9147                         case FLASH_5752PAGE_SIZE_2K:
9148                                 tp->nvram_pagesize = 2048;
9149                                 break;
9150                         case FLASH_5752PAGE_SIZE_4K:
9151                                 tp->nvram_pagesize = 4096;
9152                                 break;
9153                         case FLASH_5752PAGE_SIZE_264:
9154                                 tp->nvram_pagesize = 264;
9155                                 break;
9156                 }
9157         }
9158         else {
9159                 /* For eeprom, set pagesize to maximum eeprom size */
9160                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9161
9162                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9163                 tw32(NVRAM_CFG1, nvcfg1);
9164         }
9165 }
9166
9167 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9168 {
9169         u32 nvcfg1;
9170
9171         nvcfg1 = tr32(NVRAM_CFG1);
9172
9173         /* NVRAM protection for TPM */
9174         if (nvcfg1 & (1 << 27))
9175                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9176
9177         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9178                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9179                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9180                         tp->nvram_jedecnum = JEDEC_ATMEL;
9181                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9182                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9183
9184                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9185                         tw32(NVRAM_CFG1, nvcfg1);
9186                         break;
9187                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9188                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9189                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9190                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9191                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9192                         tp->nvram_jedecnum = JEDEC_ATMEL;
9193                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9194                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9195                         tp->nvram_pagesize = 264;
9196                         break;
9197                 case FLASH_5752VENDOR_ST_M45PE10:
9198                 case FLASH_5752VENDOR_ST_M45PE20:
9199                 case FLASH_5752VENDOR_ST_M45PE40:
9200                         tp->nvram_jedecnum = JEDEC_ST;
9201                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9202                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9203                         tp->nvram_pagesize = 256;
9204                         break;
9205         }
9206 }
9207
9208 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9209 {
9210         u32 nvcfg1;
9211
9212         nvcfg1 = tr32(NVRAM_CFG1);
9213
9214         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9215                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9216                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9217                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9218                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9219                         tp->nvram_jedecnum = JEDEC_ATMEL;
9220                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9221                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9222
9223                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9224                         tw32(NVRAM_CFG1, nvcfg1);
9225                         break;
9226                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9227                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9228                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9229                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9230                         tp->nvram_jedecnum = JEDEC_ATMEL;
9231                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9232                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9233                         tp->nvram_pagesize = 264;
9234                         break;
9235                 case FLASH_5752VENDOR_ST_M45PE10:
9236                 case FLASH_5752VENDOR_ST_M45PE20:
9237                 case FLASH_5752VENDOR_ST_M45PE40:
9238                         tp->nvram_jedecnum = JEDEC_ST;
9239                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9240                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9241                         tp->nvram_pagesize = 256;
9242                         break;
9243         }
9244 }
9245
9246 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9247 static void __devinit tg3_nvram_init(struct tg3 *tp)
9248 {
9249         int j;
9250
9251         tw32_f(GRC_EEPROM_ADDR,
9252              (EEPROM_ADDR_FSM_RESET |
9253               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9254                EEPROM_ADDR_CLKPERD_SHIFT)));
9255
9256         /* XXX schedule_timeout() ... */
9257         for (j = 0; j < 100; j++)
9258                 udelay(10);
9259
9260         /* Enable seeprom accesses. */
9261         tw32_f(GRC_LOCAL_CTRL,
9262              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9263         udelay(100);
9264
9265         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9266             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9267                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9268
9269                 if (tg3_nvram_lock(tp)) {
9270                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9271                                "tg3_nvram_init failed.\n", tp->dev->name);
9272                         return;
9273                 }
9274                 tg3_enable_nvram_access(tp);
9275
9276                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9277                         tg3_get_5752_nvram_info(tp);
9278                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9279                         tg3_get_5755_nvram_info(tp);
9280                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9281                         tg3_get_5787_nvram_info(tp);
9282                 else
9283                         tg3_get_nvram_info(tp);
9284
9285                 tg3_get_nvram_size(tp);
9286
9287                 tg3_disable_nvram_access(tp);
9288                 tg3_nvram_unlock(tp);
9289
9290         } else {
9291                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9292
9293                 tg3_get_eeprom_size(tp);
9294         }
9295 }
9296
9297 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9298                                         u32 offset, u32 *val)
9299 {
9300         u32 tmp;
9301         int i;
9302
9303         if (offset > EEPROM_ADDR_ADDR_MASK ||
9304             (offset % 4) != 0)
9305                 return -EINVAL;
9306
9307         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9308                                         EEPROM_ADDR_DEVID_MASK |
9309                                         EEPROM_ADDR_READ);
9310         tw32(GRC_EEPROM_ADDR,
9311              tmp |
9312              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9313              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9314               EEPROM_ADDR_ADDR_MASK) |
9315              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9316
9317         for (i = 0; i < 10000; i++) {
9318                 tmp = tr32(GRC_EEPROM_ADDR);
9319
9320                 if (tmp & EEPROM_ADDR_COMPLETE)
9321                         break;
9322                 udelay(100);
9323         }
9324         if (!(tmp & EEPROM_ADDR_COMPLETE))
9325                 return -EBUSY;
9326
9327         *val = tr32(GRC_EEPROM_DATA);
9328         return 0;
9329 }
9330
9331 #define NVRAM_CMD_TIMEOUT 10000
9332
9333 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9334 {
9335         int i;
9336
9337         tw32(NVRAM_CMD, nvram_cmd);
9338         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9339                 udelay(10);
9340                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9341                         udelay(10);
9342                         break;
9343                 }
9344         }
9345         if (i == NVRAM_CMD_TIMEOUT) {
9346                 return -EBUSY;
9347         }
9348         return 0;
9349 }
9350
9351 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9352 {
9353         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9354             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9355             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9356             (tp->nvram_jedecnum == JEDEC_ATMEL))
9357
9358                 addr = ((addr / tp->nvram_pagesize) <<
9359                         ATMEL_AT45DB0X1B_PAGE_POS) +
9360                        (addr % tp->nvram_pagesize);
9361
9362         return addr;
9363 }
9364
9365 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9366 {
9367         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9368             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9369             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9370             (tp->nvram_jedecnum == JEDEC_ATMEL))
9371
9372                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9373                         tp->nvram_pagesize) +
9374                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9375
9376         return addr;
9377 }
9378
9379 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9380 {
9381         int ret;
9382
9383         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9384                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9385
9386         offset = tg3_nvram_phys_addr(tp, offset);
9387
9388         if (offset > NVRAM_ADDR_MSK)
9389                 return -EINVAL;
9390
9391         ret = tg3_nvram_lock(tp);
9392         if (ret)
9393                 return ret;
9394
9395         tg3_enable_nvram_access(tp);
9396
9397         tw32(NVRAM_ADDR, offset);
9398         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9399                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9400
9401         if (ret == 0)
9402                 *val = swab32(tr32(NVRAM_RDDATA));
9403
9404         tg3_disable_nvram_access(tp);
9405
9406         tg3_nvram_unlock(tp);
9407
9408         return ret;
9409 }
9410
9411 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9412 {
9413         int err;
9414         u32 tmp;
9415
9416         err = tg3_nvram_read(tp, offset, &tmp);
9417         *val = swab32(tmp);
9418         return err;
9419 }
9420
9421 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9422                                     u32 offset, u32 len, u8 *buf)
9423 {
9424         int i, j, rc = 0;
9425         u32 val;
9426
9427         for (i = 0; i < len; i += 4) {
9428                 u32 addr, data;
9429
9430                 addr = offset + i;
9431
9432                 memcpy(&data, buf + i, 4);
9433
9434                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9435
9436                 val = tr32(GRC_EEPROM_ADDR);
9437                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9438
9439                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9440                         EEPROM_ADDR_READ);
9441                 tw32(GRC_EEPROM_ADDR, val |
9442                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9443                         (addr & EEPROM_ADDR_ADDR_MASK) |
9444                         EEPROM_ADDR_START |
9445                         EEPROM_ADDR_WRITE);
9446                 
9447                 for (j = 0; j < 10000; j++) {
9448                         val = tr32(GRC_EEPROM_ADDR);
9449
9450                         if (val & EEPROM_ADDR_COMPLETE)
9451                                 break;
9452                         udelay(100);
9453                 }
9454                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9455                         rc = -EBUSY;
9456                         break;
9457                 }
9458         }
9459
9460         return rc;
9461 }
9462
9463 /* offset and length are dword aligned */
9464 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9465                 u8 *buf)
9466 {
9467         int ret = 0;
9468         u32 pagesize = tp->nvram_pagesize;
9469         u32 pagemask = pagesize - 1;
9470         u32 nvram_cmd;
9471         u8 *tmp;
9472
9473         tmp = kmalloc(pagesize, GFP_KERNEL);
9474         if (tmp == NULL)
9475                 return -ENOMEM;
9476
9477         while (len) {
9478                 int j;
9479                 u32 phy_addr, page_off, size;
9480
9481                 phy_addr = offset & ~pagemask;
9482         
9483                 for (j = 0; j < pagesize; j += 4) {
9484                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9485                                                 (u32 *) (tmp + j))))
9486                                 break;
9487                 }
9488                 if (ret)
9489                         break;
9490
9491                 page_off = offset & pagemask;
9492                 size = pagesize;
9493                 if (len < size)
9494                         size = len;
9495
9496                 len -= size;
9497
9498                 memcpy(tmp + page_off, buf, size);
9499
9500                 offset = offset + (pagesize - page_off);
9501
9502                 tg3_enable_nvram_access(tp);
9503
9504                 /*
9505                  * Before we can erase the flash page, we need
9506                  * to issue a special "write enable" command.
9507                  */
9508                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9509
9510                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9511                         break;
9512
9513                 /* Erase the target page */
9514                 tw32(NVRAM_ADDR, phy_addr);
9515
9516                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9517                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9518
9519                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9520                         break;
9521
9522                 /* Issue another write enable to start the write. */
9523                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9524
9525                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9526                         break;
9527
9528                 for (j = 0; j < pagesize; j += 4) {
9529                         u32 data;
9530
9531                         data = *((u32 *) (tmp + j));
9532                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9533
9534                         tw32(NVRAM_ADDR, phy_addr + j);
9535
9536                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9537                                 NVRAM_CMD_WR;
9538
9539                         if (j == 0)
9540                                 nvram_cmd |= NVRAM_CMD_FIRST;
9541                         else if (j == (pagesize - 4))
9542                                 nvram_cmd |= NVRAM_CMD_LAST;
9543
9544                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9545                                 break;
9546                 }
9547                 if (ret)
9548                         break;
9549         }
9550
9551         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9552         tg3_nvram_exec_cmd(tp, nvram_cmd);
9553
9554         kfree(tmp);
9555
9556         return ret;
9557 }
9558
9559 /* offset and length are dword aligned */
9560 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9561                 u8 *buf)
9562 {
9563         int i, ret = 0;
9564
9565         for (i = 0; i < len; i += 4, offset += 4) {
9566                 u32 data, page_off, phy_addr, nvram_cmd;
9567
9568                 memcpy(&data, buf + i, 4);
9569                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9570
9571                 page_off = offset % tp->nvram_pagesize;
9572
9573                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9574
9575                 tw32(NVRAM_ADDR, phy_addr);
9576
9577                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9578
9579                 if ((page_off == 0) || (i == 0))
9580                         nvram_cmd |= NVRAM_CMD_FIRST;
9581                 if (page_off == (tp->nvram_pagesize - 4))
9582                         nvram_cmd |= NVRAM_CMD_LAST;
9583
9584                 if (i == (len - 4))
9585                         nvram_cmd |= NVRAM_CMD_LAST;
9586
9587                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9588                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9589                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9590                     (tp->nvram_jedecnum == JEDEC_ST) &&
9591                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9592
9593                         if ((ret = tg3_nvram_exec_cmd(tp,
9594                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9595                                 NVRAM_CMD_DONE)))
9596
9597                                 break;
9598                 }
9599                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9600                         /* We always do complete word writes to eeprom. */
9601                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9602                 }
9603
9604                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9605                         break;
9606         }
9607         return ret;
9608 }
9609
9610 /* offset and length are dword aligned */
9611 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9612 {
9613         int ret;
9614
9615         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9616                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9617                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9618                 udelay(40);
9619         }
9620
9621         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9622                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9623         }
9624         else {
9625                 u32 grc_mode;
9626
9627                 ret = tg3_nvram_lock(tp);
9628                 if (ret)
9629                         return ret;
9630
9631                 tg3_enable_nvram_access(tp);
9632                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9633                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9634                         tw32(NVRAM_WRITE1, 0x406);
9635
9636                 grc_mode = tr32(GRC_MODE);
9637                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9638
9639                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9640                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9641
9642                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9643                                 buf);
9644                 }
9645                 else {
9646                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9647                                 buf);
9648                 }
9649
9650                 grc_mode = tr32(GRC_MODE);
9651                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9652
9653                 tg3_disable_nvram_access(tp);
9654                 tg3_nvram_unlock(tp);
9655         }
9656
9657         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9658                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9659                 udelay(40);
9660         }
9661
9662         return ret;
9663 }
9664
9665 struct subsys_tbl_ent {
9666         u16 subsys_vendor, subsys_devid;
9667         u32 phy_id;
9668 };
9669
9670 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9671         /* Broadcom boards. */
9672         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9673         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9674         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9675         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9676         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9677         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9678         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9679         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9680         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9681         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9682         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9683
9684         /* 3com boards. */
9685         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9686         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9687         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9688         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9689         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9690
9691         /* DELL boards. */
9692         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9693         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9694         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9695         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9696
9697         /* Compaq boards. */
9698         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9699         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9700         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9701         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9702         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9703
9704         /* IBM boards. */
9705         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9706 };
9707
9708 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9709 {
9710         int i;
9711
9712         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9713                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9714                      tp->pdev->subsystem_vendor) &&
9715                     (subsys_id_to_phy_id[i].subsys_devid ==
9716                      tp->pdev->subsystem_device))
9717                         return &subsys_id_to_phy_id[i];
9718         }
9719         return NULL;
9720 }
9721
9722 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9723 {
9724         u32 val;
9725         u16 pmcsr;
9726
9727         /* On some early chips the SRAM cannot be accessed in D3hot state,
9728          * so need make sure we're in D0.
9729          */
9730         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9731         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9732         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9733         msleep(1);
9734
9735         /* Make sure register accesses (indirect or otherwise)
9736          * will function correctly.
9737          */
9738         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9739                                tp->misc_host_ctrl);
9740
9741         /* The memory arbiter has to be enabled in order for SRAM accesses
9742          * to succeed.  Normally on powerup the tg3 chip firmware will make
9743          * sure it is enabled, but other entities such as system netboot
9744          * code might disable it.
9745          */
9746         val = tr32(MEMARB_MODE);
9747         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9748
9749         tp->phy_id = PHY_ID_INVALID;
9750         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9751
9752         /* Assume an onboard device by default.  */
9753         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9754
9755         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9756         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9757                 u32 nic_cfg, led_cfg;
9758                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9759                 int eeprom_phy_serdes = 0;
9760
9761                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9762                 tp->nic_sram_data_cfg = nic_cfg;
9763
9764                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9765                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9766                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9767                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9768                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9769                     (ver > 0) && (ver < 0x100))
9770                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9771
9772                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9773                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9774                         eeprom_phy_serdes = 1;
9775
9776                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9777                 if (nic_phy_id != 0) {
9778                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9779                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9780
9781                         eeprom_phy_id  = (id1 >> 16) << 10;
9782                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9783                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9784                 } else
9785                         eeprom_phy_id = 0;
9786
9787                 tp->phy_id = eeprom_phy_id;
9788                 if (eeprom_phy_serdes) {
9789                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9790                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9791                         else
9792                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9793                 }
9794
9795                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9796                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9797                                     SHASTA_EXT_LED_MODE_MASK);
9798                 else
9799                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9800
9801                 switch (led_cfg) {
9802                 default:
9803                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9804                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9805                         break;
9806
9807                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9808                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9809                         break;
9810
9811                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9812                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9813
9814                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9815                          * read on some older 5700/5701 bootcode.
9816                          */
9817                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9818                             ASIC_REV_5700 ||
9819                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9820                             ASIC_REV_5701)
9821                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9822
9823                         break;
9824
9825                 case SHASTA_EXT_LED_SHARED:
9826                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9827                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9828                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9829                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9830                                                  LED_CTRL_MODE_PHY_2);
9831                         break;
9832
9833                 case SHASTA_EXT_LED_MAC:
9834                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9835                         break;
9836
9837                 case SHASTA_EXT_LED_COMBO:
9838                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9839                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9840                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9841                                                  LED_CTRL_MODE_PHY_2);
9842                         break;
9843
9844                 };
9845
9846                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9847                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9848                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9849                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9850
9851                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9852                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9853                 else
9854                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9855
9856                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9857                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9858                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9859                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9860                 }
9861                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9862                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9863
9864                 if (cfg2 & (1 << 17))
9865                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9866
9867                 /* serdes signal pre-emphasis in register 0x590 set by */
9868                 /* bootcode if bit 18 is set */
9869                 if (cfg2 & (1 << 18))
9870                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9871         }
9872 }
9873
9874 static int __devinit tg3_phy_probe(struct tg3 *tp)
9875 {
9876         u32 hw_phy_id_1, hw_phy_id_2;
9877         u32 hw_phy_id, hw_phy_id_masked;
9878         int err;
9879
9880         /* Reading the PHY ID register can conflict with ASF
9881          * firwmare access to the PHY hardware.
9882          */
9883         err = 0;
9884         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9885                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9886         } else {
9887                 /* Now read the physical PHY_ID from the chip and verify
9888                  * that it is sane.  If it doesn't look good, we fall back
9889                  * to either the hard-coded table based PHY_ID and failing
9890                  * that the value found in the eeprom area.
9891                  */
9892                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9893                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9894
9895                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9896                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9897                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9898
9899                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9900         }
9901
9902         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9903                 tp->phy_id = hw_phy_id;
9904                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9905                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9906                 else
9907                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9908         } else {
9909                 if (tp->phy_id != PHY_ID_INVALID) {
9910                         /* Do nothing, phy ID already set up in
9911                          * tg3_get_eeprom_hw_cfg().
9912                          */
9913                 } else {
9914                         struct subsys_tbl_ent *p;
9915
9916                         /* No eeprom signature?  Try the hardcoded
9917                          * subsys device table.
9918                          */
9919                         p = lookup_by_subsys(tp);
9920                         if (!p)
9921                                 return -ENODEV;
9922
9923                         tp->phy_id = p->phy_id;
9924                         if (!tp->phy_id ||
9925                             tp->phy_id == PHY_ID_BCM8002)
9926                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9927                 }
9928         }
9929
9930         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9931             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9932                 u32 bmsr, adv_reg, tg3_ctrl;
9933
9934                 tg3_readphy(tp, MII_BMSR, &bmsr);
9935                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9936                     (bmsr & BMSR_LSTATUS))
9937                         goto skip_phy_reset;
9938                     
9939                 err = tg3_phy_reset(tp);
9940                 if (err)
9941                         return err;
9942
9943                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9944                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9945                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9946                 tg3_ctrl = 0;
9947                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9948                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9949                                     MII_TG3_CTRL_ADV_1000_FULL);
9950                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9951                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9952                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9953                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9954                 }
9955
9956                 if (!tg3_copper_is_advertising_all(tp)) {
9957                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9958
9959                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9960                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9961
9962                         tg3_writephy(tp, MII_BMCR,
9963                                      BMCR_ANENABLE | BMCR_ANRESTART);
9964                 }
9965                 tg3_phy_set_wirespeed(tp);
9966
9967                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9968                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9969                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9970         }
9971
9972 skip_phy_reset:
9973         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9974                 err = tg3_init_5401phy_dsp(tp);
9975                 if (err)
9976                         return err;
9977         }
9978
9979         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9980                 err = tg3_init_5401phy_dsp(tp);
9981         }
9982
9983         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9984                 tp->link_config.advertising =
9985                         (ADVERTISED_1000baseT_Half |
9986                          ADVERTISED_1000baseT_Full |
9987                          ADVERTISED_Autoneg |
9988                          ADVERTISED_FIBRE);
9989         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9990                 tp->link_config.advertising &=
9991                         ~(ADVERTISED_1000baseT_Half |
9992                           ADVERTISED_1000baseT_Full);
9993
9994         return err;
9995 }
9996
9997 static void __devinit tg3_read_partno(struct tg3 *tp)
9998 {
9999         unsigned char vpd_data[256];
10000         int i;
10001         u32 magic;
10002
10003         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10004                 goto out_not_found;
10005
10006         if (magic == TG3_EEPROM_MAGIC) {
10007                 for (i = 0; i < 256; i += 4) {
10008                         u32 tmp;
10009
10010                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10011                                 goto out_not_found;
10012
10013                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10014                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10015                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10016                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10017                 }
10018         } else {
10019                 int vpd_cap;
10020
10021                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10022                 for (i = 0; i < 256; i += 4) {
10023                         u32 tmp, j = 0;
10024                         u16 tmp16;
10025
10026                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10027                                               i);
10028                         while (j++ < 100) {
10029                                 pci_read_config_word(tp->pdev, vpd_cap +
10030                                                      PCI_VPD_ADDR, &tmp16);
10031                                 if (tmp16 & 0x8000)
10032                                         break;
10033                                 msleep(1);
10034                         }
10035                         if (!(tmp16 & 0x8000))
10036                                 goto out_not_found;
10037
10038                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10039                                               &tmp);
10040                         tmp = cpu_to_le32(tmp);
10041                         memcpy(&vpd_data[i], &tmp, 4);
10042                 }
10043         }
10044
10045         /* Now parse and find the part number. */
10046         for (i = 0; i < 256; ) {
10047                 unsigned char val = vpd_data[i];
10048                 int block_end;
10049
10050                 if (val == 0x82 || val == 0x91) {
10051                         i = (i + 3 +
10052                              (vpd_data[i + 1] +
10053                               (vpd_data[i + 2] << 8)));
10054                         continue;
10055                 }
10056
10057                 if (val != 0x90)
10058                         goto out_not_found;
10059
10060                 block_end = (i + 3 +
10061                              (vpd_data[i + 1] +
10062                               (vpd_data[i + 2] << 8)));
10063                 i += 3;
10064                 while (i < block_end) {
10065                         if (vpd_data[i + 0] == 'P' &&
10066                             vpd_data[i + 1] == 'N') {
10067                                 int partno_len = vpd_data[i + 2];
10068
10069                                 if (partno_len > 24)
10070                                         goto out_not_found;
10071
10072                                 memcpy(tp->board_part_number,
10073                                        &vpd_data[i + 3],
10074                                        partno_len);
10075
10076                                 /* Success. */
10077                                 return;
10078                         }
10079                 }
10080
10081                 /* Part number not found. */
10082                 goto out_not_found;
10083         }
10084
10085 out_not_found:
10086         strcpy(tp->board_part_number, "none");
10087 }
10088
10089 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10090 {
10091         u32 val, offset, start;
10092
10093         if (tg3_nvram_read_swab(tp, 0, &val))
10094                 return;
10095
10096         if (val != TG3_EEPROM_MAGIC)
10097                 return;
10098
10099         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10100             tg3_nvram_read_swab(tp, 0x4, &start))
10101                 return;
10102
10103         offset = tg3_nvram_logical_addr(tp, offset);
10104         if (tg3_nvram_read_swab(tp, offset, &val))
10105                 return;
10106
10107         if ((val & 0xfc000000) == 0x0c000000) {
10108                 u32 ver_offset, addr;
10109                 int i;
10110
10111                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10112                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10113                         return;
10114
10115                 if (val != 0)
10116                         return;
10117
10118                 addr = offset + ver_offset - start;
10119                 for (i = 0; i < 16; i += 4) {
10120                         if (tg3_nvram_read(tp, addr + i, &val))
10121                                 return;
10122
10123                         val = cpu_to_le32(val);
10124                         memcpy(tp->fw_ver + i, &val, 4);
10125                 }
10126         }
10127 }
10128
10129 static int __devinit tg3_get_invariants(struct tg3 *tp)
10130 {
10131         static struct pci_device_id write_reorder_chipsets[] = {
10132                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10133                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10134                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10135                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10136                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10137                              PCI_DEVICE_ID_VIA_8385_0) },
10138                 { },
10139         };
10140         u32 misc_ctrl_reg;
10141         u32 cacheline_sz_reg;
10142         u32 pci_state_reg, grc_misc_cfg;
10143         u32 val;
10144         u16 pci_cmd;
10145         int err;
10146
10147         /* Force memory write invalidate off.  If we leave it on,
10148          * then on 5700_BX chips we have to enable a workaround.
10149          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10150          * to match the cacheline size.  The Broadcom driver have this
10151          * workaround but turns MWI off all the times so never uses
10152          * it.  This seems to suggest that the workaround is insufficient.
10153          */
10154         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10155         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10156         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10157
10158         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10159          * has the register indirect write enable bit set before
10160          * we try to access any of the MMIO registers.  It is also
10161          * critical that the PCI-X hw workaround situation is decided
10162          * before that as well.
10163          */
10164         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10165                               &misc_ctrl_reg);
10166
10167         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10168                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10169
10170         /* Wrong chip ID in 5752 A0. This code can be removed later
10171          * as A0 is not in production.
10172          */
10173         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10174                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10175
10176         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10177          * we need to disable memory and use config. cycles
10178          * only to access all registers. The 5702/03 chips
10179          * can mistakenly decode the special cycles from the
10180          * ICH chipsets as memory write cycles, causing corruption
10181          * of register and memory space. Only certain ICH bridges
10182          * will drive special cycles with non-zero data during the
10183          * address phase which can fall within the 5703's address
10184          * range. This is not an ICH bug as the PCI spec allows
10185          * non-zero address during special cycles. However, only
10186          * these ICH bridges are known to drive non-zero addresses
10187          * during special cycles.
10188          *
10189          * Since special cycles do not cross PCI bridges, we only
10190          * enable this workaround if the 5703 is on the secondary
10191          * bus of these ICH bridges.
10192          */
10193         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10194             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10195                 static struct tg3_dev_id {
10196                         u32     vendor;
10197                         u32     device;
10198                         u32     rev;
10199                 } ich_chipsets[] = {
10200                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10201                           PCI_ANY_ID },
10202                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10203                           PCI_ANY_ID },
10204                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10205                           0xa },
10206                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10207                           PCI_ANY_ID },
10208                         { },
10209                 };
10210                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10211                 struct pci_dev *bridge = NULL;
10212
10213                 while (pci_id->vendor != 0) {
10214                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10215                                                 bridge);
10216                         if (!bridge) {
10217                                 pci_id++;
10218                                 continue;
10219                         }
10220                         if (pci_id->rev != PCI_ANY_ID) {
10221                                 u8 rev;
10222
10223                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10224                                                      &rev);
10225                                 if (rev > pci_id->rev)
10226                                         continue;
10227                         }
10228                         if (bridge->subordinate &&
10229                             (bridge->subordinate->number ==
10230                              tp->pdev->bus->number)) {
10231
10232                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10233                                 pci_dev_put(bridge);
10234                                 break;
10235                         }
10236                 }
10237         }
10238
10239         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10240          * DMA addresses > 40-bit. This bridge may have other additional
10241          * 57xx devices behind it in some 4-port NIC designs for example.
10242          * Any tg3 device found behind the bridge will also need the 40-bit
10243          * DMA workaround.
10244          */
10245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10247                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10248                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10249                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10250         }
10251         else {
10252                 struct pci_dev *bridge = NULL;
10253
10254                 do {
10255                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10256                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10257                                                 bridge);
10258                         if (bridge && bridge->subordinate &&
10259                             (bridge->subordinate->number <=
10260                              tp->pdev->bus->number) &&
10261                             (bridge->subordinate->subordinate >=
10262                              tp->pdev->bus->number)) {
10263                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10264                                 pci_dev_put(bridge);
10265                                 break;
10266                         }
10267                 } while (bridge);
10268         }
10269
10270         /* Initialize misc host control in PCI block. */
10271         tp->misc_host_ctrl |= (misc_ctrl_reg &
10272                                MISC_HOST_CTRL_CHIPREV);
10273         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10274                                tp->misc_host_ctrl);
10275
10276         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10277                               &cacheline_sz_reg);
10278
10279         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10280         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10281         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10282         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10283
10284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10288             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10289                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10290
10291         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10292             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10293                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10294
10295         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10297                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10298                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10299                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10300                 } else {
10301                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10302                                           TG3_FLG2_HW_TSO_1_BUG;
10303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10304                                 ASIC_REV_5750 &&
10305                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10306                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10307                 }
10308         }
10309
10310         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10311             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10312             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10313             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10314             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10315                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10316
10317         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10318                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10319
10320         /* If we have an AMD 762 or VIA K8T800 chipset, write
10321          * reordering to the mailbox registers done by the host
10322          * controller can cause major troubles.  We read back from
10323          * every mailbox register write to force the writes to be
10324          * posted to the chip in order.
10325          */
10326         if (pci_dev_present(write_reorder_chipsets) &&
10327             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10328                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10329
10330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10331             tp->pci_lat_timer < 64) {
10332                 tp->pci_lat_timer = 64;
10333
10334                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10335                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10336                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10337                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10338
10339                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10340                                        cacheline_sz_reg);
10341         }
10342
10343         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10344                               &pci_state_reg);
10345
10346         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10347                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10348
10349                 /* If this is a 5700 BX chipset, and we are in PCI-X
10350                  * mode, enable register write workaround.
10351                  *
10352                  * The workaround is to use indirect register accesses
10353                  * for all chip writes not to mailbox registers.
10354                  */
10355                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10356                         u32 pm_reg;
10357                         u16 pci_cmd;
10358
10359                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10360
10361                         /* The chip can have it's power management PCI config
10362                          * space registers clobbered due to this bug.
10363                          * So explicitly force the chip into D0 here.
10364                          */
10365                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10366                                               &pm_reg);
10367                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10368                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10369                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10370                                                pm_reg);
10371
10372                         /* Also, force SERR#/PERR# in PCI command. */
10373                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10374                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10375                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10376                 }
10377         }
10378
10379         /* 5700 BX chips need to have their TX producer index mailboxes
10380          * written twice to workaround a bug.
10381          */
10382         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10383                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10384
10385         /* Back to back register writes can cause problems on this chip,
10386          * the workaround is to read back all reg writes except those to
10387          * mailbox regs.  See tg3_write_indirect_reg32().
10388          *
10389          * PCI Express 5750_A0 rev chips need this workaround too.
10390          */
10391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10392             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10393              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10394                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10395
10396         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10397                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10398         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10399                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10400
10401         /* Chip-specific fixup from Broadcom driver */
10402         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10403             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10404                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10405                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10406         }
10407
10408         /* Default fast path register access methods */
10409         tp->read32 = tg3_read32;
10410         tp->write32 = tg3_write32;
10411         tp->read32_mbox = tg3_read32;
10412         tp->write32_mbox = tg3_write32;
10413         tp->write32_tx_mbox = tg3_write32;
10414         tp->write32_rx_mbox = tg3_write32;
10415
10416         /* Various workaround register access methods */
10417         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10418                 tp->write32 = tg3_write_indirect_reg32;
10419         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10420                 tp->write32 = tg3_write_flush_reg32;
10421
10422         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10423             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10424                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10425                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10426                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10427         }
10428
10429         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10430                 tp->read32 = tg3_read_indirect_reg32;
10431                 tp->write32 = tg3_write_indirect_reg32;
10432                 tp->read32_mbox = tg3_read_indirect_mbox;
10433                 tp->write32_mbox = tg3_write_indirect_mbox;
10434                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10435                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10436
10437                 iounmap(tp->regs);
10438                 tp->regs = NULL;
10439
10440                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10441                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10442                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10443         }
10444
10445         if (tp->write32 == tg3_write_indirect_reg32 ||
10446             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10447              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10448               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10449                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10450
10451         /* Get eeprom hw config before calling tg3_set_power_state().
10452          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10453          * determined before calling tg3_set_power_state() so that
10454          * we know whether or not to switch out of Vaux power.
10455          * When the flag is set, it means that GPIO1 is used for eeprom
10456          * write protect and also implies that it is a LOM where GPIOs
10457          * are not used to switch power.
10458          */ 
10459         tg3_get_eeprom_hw_cfg(tp);
10460
10461         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10462          * GPIO1 driven high will bring 5700's external PHY out of reset.
10463          * It is also used as eeprom write protect on LOMs.
10464          */
10465         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10466         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10467             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10468                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10469                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10470         /* Unused GPIO3 must be driven as output on 5752 because there
10471          * are no pull-up resistors on unused GPIO pins.
10472          */
10473         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10474                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10475
10476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10477                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10478
10479         /* Force the chip into D0. */
10480         err = tg3_set_power_state(tp, PCI_D0);
10481         if (err) {
10482                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10483                        pci_name(tp->pdev));
10484                 return err;
10485         }
10486
10487         /* 5700 B0 chips do not support checksumming correctly due
10488          * to hardware bugs.
10489          */
10490         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10491                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10492
10493         /* Derive initial jumbo mode from MTU assigned in
10494          * ether_setup() via the alloc_etherdev() call
10495          */
10496         if (tp->dev->mtu > ETH_DATA_LEN &&
10497             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10498                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10499
10500         /* Determine WakeOnLan speed to use. */
10501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10502             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10503             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10504             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10505                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10506         } else {
10507                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10508         }
10509
10510         /* A few boards don't want Ethernet@WireSpeed phy feature */
10511         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10512             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10513              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10514              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10515             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10516                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10517
10518         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10519             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10520                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10521         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10522                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10523
10524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10525                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10526                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10527                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10528                 else
10529                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10530         }
10531
10532         tp->coalesce_mode = 0;
10533         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10534             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10535                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10536
10537         /* Initialize MAC MI mode, polling disabled. */
10538         tw32_f(MAC_MI_MODE, tp->mi_mode);
10539         udelay(80);
10540
10541         /* Initialize data/descriptor byte/word swapping. */
10542         val = tr32(GRC_MODE);
10543         val &= GRC_MODE_HOST_STACKUP;
10544         tw32(GRC_MODE, val | tp->grc_mode);
10545
10546         tg3_switch_clocks(tp);
10547
10548         /* Clear this out for sanity. */
10549         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10550
10551         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10552                               &pci_state_reg);
10553         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10554             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10555                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10556
10557                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10558                     chiprevid == CHIPREV_ID_5701_B0 ||
10559                     chiprevid == CHIPREV_ID_5701_B2 ||
10560                     chiprevid == CHIPREV_ID_5701_B5) {
10561                         void __iomem *sram_base;
10562
10563                         /* Write some dummy words into the SRAM status block
10564                          * area, see if it reads back correctly.  If the return
10565                          * value is bad, force enable the PCIX workaround.
10566                          */
10567                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10568
10569                         writel(0x00000000, sram_base);
10570                         writel(0x00000000, sram_base + 4);
10571                         writel(0xffffffff, sram_base + 4);
10572                         if (readl(sram_base) != 0x00000000)
10573                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10574                 }
10575         }
10576
10577         udelay(50);
10578         tg3_nvram_init(tp);
10579
10580         grc_misc_cfg = tr32(GRC_MISC_CFG);
10581         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10582
10583         /* Broadcom's driver says that CIOBE multisplit has a bug */
10584 #if 0
10585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10586             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10587                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10588                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10589         }
10590 #endif
10591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10592             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10593              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10594                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10595
10596         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10597             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10598                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10599         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10600                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10601                                       HOSTCC_MODE_CLRTICK_TXBD);
10602
10603                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10604                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10605                                        tp->misc_host_ctrl);
10606         }
10607
10608         /* these are limited to 10/100 only */
10609         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10610              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10611             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10612              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10613              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10614               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10615               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10616             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10617              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10618               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10619                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10620
10621         err = tg3_phy_probe(tp);
10622         if (err) {
10623                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10624                        pci_name(tp->pdev), err);
10625                 /* ... but do not return immediately ... */
10626         }
10627
10628         tg3_read_partno(tp);
10629         tg3_read_fw_ver(tp);
10630
10631         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10632                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10633         } else {
10634                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10635                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10636                 else
10637                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10638         }
10639
10640         /* 5700 {AX,BX} chips have a broken status block link
10641          * change bit implementation, so we must use the
10642          * status register in those cases.
10643          */
10644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10645                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10646         else
10647                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10648
10649         /* The led_ctrl is set during tg3_phy_probe, here we might
10650          * have to force the link status polling mechanism based
10651          * upon subsystem IDs.
10652          */
10653         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10654             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10655                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10656                                   TG3_FLAG_USE_LINKCHG_REG);
10657         }
10658
10659         /* For all SERDES we poll the MAC status register. */
10660         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10661                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10662         else
10663                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10664
10665         /* All chips before 5787 can get confused if TX buffers
10666          * straddle the 4GB address boundary in some cases.
10667          */
10668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10670                 tp->dev->hard_start_xmit = tg3_start_xmit;
10671         else
10672                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10673
10674         tp->rx_offset = 2;
10675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10676             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10677                 tp->rx_offset = 0;
10678
10679         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10680
10681         /* Increment the rx prod index on the rx std ring by at most
10682          * 8 for these chips to workaround hw errata.
10683          */
10684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10687                 tp->rx_std_max_post = 8;
10688
10689         /* By default, disable wake-on-lan.  User can change this
10690          * using ETHTOOL_SWOL.
10691          */
10692         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10693
10694         return err;
10695 }
10696
10697 #ifdef CONFIG_SPARC64
10698 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10699 {
10700         struct net_device *dev = tp->dev;
10701         struct pci_dev *pdev = tp->pdev;
10702         struct pcidev_cookie *pcp = pdev->sysdata;
10703
10704         if (pcp != NULL) {
10705                 unsigned char *addr;
10706                 int len;
10707
10708                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10709                                         &len);
10710                 if (addr && len == 6) {
10711                         memcpy(dev->dev_addr, addr, 6);
10712                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10713                         return 0;
10714                 }
10715         }
10716         return -ENODEV;
10717 }
10718
10719 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10720 {
10721         struct net_device *dev = tp->dev;
10722
10723         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10724         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10725         return 0;
10726 }
10727 #endif
10728
10729 static int __devinit tg3_get_device_address(struct tg3 *tp)
10730 {
10731         struct net_device *dev = tp->dev;
10732         u32 hi, lo, mac_offset;
10733         int addr_ok = 0;
10734
10735 #ifdef CONFIG_SPARC64
10736         if (!tg3_get_macaddr_sparc(tp))
10737                 return 0;
10738 #endif
10739
10740         mac_offset = 0x7c;
10741         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10742             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10743                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10744                         mac_offset = 0xcc;
10745                 if (tg3_nvram_lock(tp))
10746                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10747                 else
10748                         tg3_nvram_unlock(tp);
10749         }
10750
10751         /* First try to get it from MAC address mailbox. */
10752         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10753         if ((hi >> 16) == 0x484b) {
10754                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10755                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10756
10757                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10758                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10759                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10760                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10761                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10762
10763                 /* Some old bootcode may report a 0 MAC address in SRAM */
10764                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10765         }
10766         if (!addr_ok) {
10767                 /* Next, try NVRAM. */
10768                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10769                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10770                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10771                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10772                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10773                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10774                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10775                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10776                 }
10777                 /* Finally just fetch it out of the MAC control regs. */
10778                 else {
10779                         hi = tr32(MAC_ADDR_0_HIGH);
10780                         lo = tr32(MAC_ADDR_0_LOW);
10781
10782                         dev->dev_addr[5] = lo & 0xff;
10783                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10784                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10785                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10786                         dev->dev_addr[1] = hi & 0xff;
10787                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10788                 }
10789         }
10790
10791         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10792 #ifdef CONFIG_SPARC64
10793                 if (!tg3_get_default_macaddr_sparc(tp))
10794                         return 0;
10795 #endif
10796                 return -EINVAL;
10797         }
10798         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10799         return 0;
10800 }
10801
10802 #define BOUNDARY_SINGLE_CACHELINE       1
10803 #define BOUNDARY_MULTI_CACHELINE        2
10804
10805 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10806 {
10807         int cacheline_size;
10808         u8 byte;
10809         int goal;
10810
10811         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10812         if (byte == 0)
10813                 cacheline_size = 1024;
10814         else
10815                 cacheline_size = (int) byte * 4;
10816
10817         /* On 5703 and later chips, the boundary bits have no
10818          * effect.
10819          */
10820         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10821             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10822             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10823                 goto out;
10824
10825 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10826         goal = BOUNDARY_MULTI_CACHELINE;
10827 #else
10828 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10829         goal = BOUNDARY_SINGLE_CACHELINE;
10830 #else
10831         goal = 0;
10832 #endif
10833 #endif
10834
10835         if (!goal)
10836                 goto out;
10837
10838         /* PCI controllers on most RISC systems tend to disconnect
10839          * when a device tries to burst across a cache-line boundary.
10840          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10841          *
10842          * Unfortunately, for PCI-E there are only limited
10843          * write-side controls for this, and thus for reads
10844          * we will still get the disconnects.  We'll also waste
10845          * these PCI cycles for both read and write for chips
10846          * other than 5700 and 5701 which do not implement the
10847          * boundary bits.
10848          */
10849         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10850             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10851                 switch (cacheline_size) {
10852                 case 16:
10853                 case 32:
10854                 case 64:
10855                 case 128:
10856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10857                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10858                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10859                         } else {
10860                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10861                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10862                         }
10863                         break;
10864
10865                 case 256:
10866                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10867                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10868                         break;
10869
10870                 default:
10871                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10872                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10873                         break;
10874                 };
10875         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10876                 switch (cacheline_size) {
10877                 case 16:
10878                 case 32:
10879                 case 64:
10880                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10881                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10882                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10883                                 break;
10884                         }
10885                         /* fallthrough */
10886                 case 128:
10887                 default:
10888                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10889                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10890                         break;
10891                 };
10892         } else {
10893                 switch (cacheline_size) {
10894                 case 16:
10895                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10896                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10897                                         DMA_RWCTRL_WRITE_BNDRY_16);
10898                                 break;
10899                         }
10900                         /* fallthrough */
10901                 case 32:
10902                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10903                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10904                                         DMA_RWCTRL_WRITE_BNDRY_32);
10905                                 break;
10906                         }
10907                         /* fallthrough */
10908                 case 64:
10909                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10910                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10911                                         DMA_RWCTRL_WRITE_BNDRY_64);
10912                                 break;
10913                         }
10914                         /* fallthrough */
10915                 case 128:
10916                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10917                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10918                                         DMA_RWCTRL_WRITE_BNDRY_128);
10919                                 break;
10920                         }
10921                         /* fallthrough */
10922                 case 256:
10923                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10924                                 DMA_RWCTRL_WRITE_BNDRY_256);
10925                         break;
10926                 case 512:
10927                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10928                                 DMA_RWCTRL_WRITE_BNDRY_512);
10929                         break;
10930                 case 1024:
10931                 default:
10932                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10933                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10934                         break;
10935                 };
10936         }
10937
10938 out:
10939         return val;
10940 }
10941
10942 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10943 {
10944         struct tg3_internal_buffer_desc test_desc;
10945         u32 sram_dma_descs;
10946         int i, ret;
10947
10948         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10949
10950         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10951         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10952         tw32(RDMAC_STATUS, 0);
10953         tw32(WDMAC_STATUS, 0);
10954
10955         tw32(BUFMGR_MODE, 0);
10956         tw32(FTQ_RESET, 0);
10957
10958         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10959         test_desc.addr_lo = buf_dma & 0xffffffff;
10960         test_desc.nic_mbuf = 0x00002100;
10961         test_desc.len = size;
10962
10963         /*
10964          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10965          * the *second* time the tg3 driver was getting loaded after an
10966          * initial scan.
10967          *
10968          * Broadcom tells me:
10969          *   ...the DMA engine is connected to the GRC block and a DMA
10970          *   reset may affect the GRC block in some unpredictable way...
10971          *   The behavior of resets to individual blocks has not been tested.
10972          *
10973          * Broadcom noted the GRC reset will also reset all sub-components.
10974          */
10975         if (to_device) {
10976                 test_desc.cqid_sqid = (13 << 8) | 2;
10977
10978                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10979                 udelay(40);
10980         } else {
10981                 test_desc.cqid_sqid = (16 << 8) | 7;
10982
10983                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10984                 udelay(40);
10985         }
10986         test_desc.flags = 0x00000005;
10987
10988         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10989                 u32 val;
10990
10991                 val = *(((u32 *)&test_desc) + i);
10992                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10993                                        sram_dma_descs + (i * sizeof(u32)));
10994                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10995         }
10996         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10997
10998         if (to_device) {
10999                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11000         } else {
11001                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11002         }
11003
11004         ret = -ENODEV;
11005         for (i = 0; i < 40; i++) {
11006                 u32 val;
11007
11008                 if (to_device)
11009                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11010                 else
11011                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11012                 if ((val & 0xffff) == sram_dma_descs) {
11013                         ret = 0;
11014                         break;
11015                 }
11016
11017                 udelay(100);
11018         }
11019
11020         return ret;
11021 }
11022
11023 #define TEST_BUFFER_SIZE        0x2000
11024
11025 static int __devinit tg3_test_dma(struct tg3 *tp)
11026 {
11027         dma_addr_t buf_dma;
11028         u32 *buf, saved_dma_rwctrl;
11029         int ret;
11030
11031         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11032         if (!buf) {
11033                 ret = -ENOMEM;
11034                 goto out_nofree;
11035         }
11036
11037         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11038                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11039
11040         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11041
11042         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11043                 /* DMA read watermark not used on PCIE */
11044                 tp->dma_rwctrl |= 0x00180000;
11045         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11048                         tp->dma_rwctrl |= 0x003f0000;
11049                 else
11050                         tp->dma_rwctrl |= 0x003f000f;
11051         } else {
11052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11054                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11055
11056                         /* If the 5704 is behind the EPB bridge, we can
11057                          * do the less restrictive ONE_DMA workaround for
11058                          * better performance.
11059                          */
11060                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11061                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11062                                 tp->dma_rwctrl |= 0x8000;
11063                         else if (ccval == 0x6 || ccval == 0x7)
11064                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11065
11066                         /* Set bit 23 to enable PCIX hw bug fix */
11067                         tp->dma_rwctrl |= 0x009f0000;
11068                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11069                         /* 5780 always in PCIX mode */
11070                         tp->dma_rwctrl |= 0x00144000;
11071                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11072                         /* 5714 always in PCIX mode */
11073                         tp->dma_rwctrl |= 0x00148000;
11074                 } else {
11075                         tp->dma_rwctrl |= 0x001b000f;
11076                 }
11077         }
11078
11079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11081                 tp->dma_rwctrl &= 0xfffffff0;
11082
11083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11085                 /* Remove this if it causes problems for some boards. */
11086                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11087
11088                 /* On 5700/5701 chips, we need to set this bit.
11089                  * Otherwise the chip will issue cacheline transactions
11090                  * to streamable DMA memory with not all the byte
11091                  * enables turned on.  This is an error on several
11092                  * RISC PCI controllers, in particular sparc64.
11093                  *
11094                  * On 5703/5704 chips, this bit has been reassigned
11095                  * a different meaning.  In particular, it is used
11096                  * on those chips to enable a PCI-X workaround.
11097                  */
11098                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11099         }
11100
11101         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11102
11103 #if 0
11104         /* Unneeded, already done by tg3_get_invariants.  */
11105         tg3_switch_clocks(tp);
11106 #endif
11107
11108         ret = 0;
11109         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11110             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11111                 goto out;
11112
11113         /* It is best to perform DMA test with maximum write burst size
11114          * to expose the 5700/5701 write DMA bug.
11115          */
11116         saved_dma_rwctrl = tp->dma_rwctrl;
11117         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11118         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11119
11120         while (1) {
11121                 u32 *p = buf, i;
11122
11123                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11124                         p[i] = i;
11125
11126                 /* Send the buffer to the chip. */
11127                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11128                 if (ret) {
11129                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11130                         break;
11131                 }
11132
11133 #if 0
11134                 /* validate data reached card RAM correctly. */
11135                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11136                         u32 val;
11137                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11138                         if (le32_to_cpu(val) != p[i]) {
11139                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11140                                 /* ret = -ENODEV here? */
11141                         }
11142                         p[i] = 0;
11143                 }
11144 #endif
11145                 /* Now read it back. */
11146                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11147                 if (ret) {
11148                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11149
11150                         break;
11151                 }
11152
11153                 /* Verify it. */
11154                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11155                         if (p[i] == i)
11156                                 continue;
11157
11158                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11159                             DMA_RWCTRL_WRITE_BNDRY_16) {
11160                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11161                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11162                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11163                                 break;
11164                         } else {
11165                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11166                                 ret = -ENODEV;
11167                                 goto out;
11168                         }
11169                 }
11170
11171                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11172                         /* Success. */
11173                         ret = 0;
11174                         break;
11175                 }
11176         }
11177         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11178             DMA_RWCTRL_WRITE_BNDRY_16) {
11179                 static struct pci_device_id dma_wait_state_chipsets[] = {
11180                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11181                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11182                         { },
11183                 };
11184
11185                 /* DMA test passed without adjusting DMA boundary,
11186                  * now look for chipsets that are known to expose the
11187                  * DMA bug without failing the test.
11188                  */
11189                 if (pci_dev_present(dma_wait_state_chipsets)) {
11190                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11191                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11192                 }
11193                 else
11194                         /* Safe to use the calculated DMA boundary. */
11195                         tp->dma_rwctrl = saved_dma_rwctrl;
11196
11197                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11198         }
11199
11200 out:
11201         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11202 out_nofree:
11203         return ret;
11204 }
11205
11206 static void __devinit tg3_init_link_config(struct tg3 *tp)
11207 {
11208         tp->link_config.advertising =
11209                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11210                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11211                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11212                  ADVERTISED_Autoneg | ADVERTISED_MII);
11213         tp->link_config.speed = SPEED_INVALID;
11214         tp->link_config.duplex = DUPLEX_INVALID;
11215         tp->link_config.autoneg = AUTONEG_ENABLE;
11216         tp->link_config.active_speed = SPEED_INVALID;
11217         tp->link_config.active_duplex = DUPLEX_INVALID;
11218         tp->link_config.phy_is_low_power = 0;
11219         tp->link_config.orig_speed = SPEED_INVALID;
11220         tp->link_config.orig_duplex = DUPLEX_INVALID;
11221         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11222 }
11223
11224 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11225 {
11226         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11227                 tp->bufmgr_config.mbuf_read_dma_low_water =
11228                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11229                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11230                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11231                 tp->bufmgr_config.mbuf_high_water =
11232                         DEFAULT_MB_HIGH_WATER_5705;
11233
11234                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11235                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11236                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11237                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11238                 tp->bufmgr_config.mbuf_high_water_jumbo =
11239                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11240         } else {
11241                 tp->bufmgr_config.mbuf_read_dma_low_water =
11242                         DEFAULT_MB_RDMA_LOW_WATER;
11243                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11244                         DEFAULT_MB_MACRX_LOW_WATER;
11245                 tp->bufmgr_config.mbuf_high_water =
11246                         DEFAULT_MB_HIGH_WATER;
11247
11248                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11249                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11250                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11251                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11252                 tp->bufmgr_config.mbuf_high_water_jumbo =
11253                         DEFAULT_MB_HIGH_WATER_JUMBO;
11254         }
11255
11256         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11257         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11258 }
11259
11260 static char * __devinit tg3_phy_string(struct tg3 *tp)
11261 {
11262         switch (tp->phy_id & PHY_ID_MASK) {
11263         case PHY_ID_BCM5400:    return "5400";
11264         case PHY_ID_BCM5401:    return "5401";
11265         case PHY_ID_BCM5411:    return "5411";
11266         case PHY_ID_BCM5701:    return "5701";
11267         case PHY_ID_BCM5703:    return "5703";
11268         case PHY_ID_BCM5704:    return "5704";
11269         case PHY_ID_BCM5705:    return "5705";
11270         case PHY_ID_BCM5750:    return "5750";
11271         case PHY_ID_BCM5752:    return "5752";
11272         case PHY_ID_BCM5714:    return "5714";
11273         case PHY_ID_BCM5780:    return "5780";
11274         case PHY_ID_BCM5755:    return "5755";
11275         case PHY_ID_BCM5787:    return "5787";
11276         case PHY_ID_BCM8002:    return "8002/serdes";
11277         case 0:                 return "serdes";
11278         default:                return "unknown";
11279         };
11280 }
11281
11282 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11283 {
11284         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11285                 strcpy(str, "PCI Express");
11286                 return str;
11287         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11288                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11289
11290                 strcpy(str, "PCIX:");
11291
11292                 if ((clock_ctrl == 7) ||
11293                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11294                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11295                         strcat(str, "133MHz");
11296                 else if (clock_ctrl == 0)
11297                         strcat(str, "33MHz");
11298                 else if (clock_ctrl == 2)
11299                         strcat(str, "50MHz");
11300                 else if (clock_ctrl == 4)
11301                         strcat(str, "66MHz");
11302                 else if (clock_ctrl == 6)
11303                         strcat(str, "100MHz");
11304         } else {
11305                 strcpy(str, "PCI:");
11306                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11307                         strcat(str, "66MHz");
11308                 else
11309                         strcat(str, "33MHz");
11310         }
11311         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11312                 strcat(str, ":32-bit");
11313         else
11314                 strcat(str, ":64-bit");
11315         return str;
11316 }
11317
11318 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11319 {
11320         struct pci_dev *peer;
11321         unsigned int func, devnr = tp->pdev->devfn & ~7;
11322
11323         for (func = 0; func < 8; func++) {
11324                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11325                 if (peer && peer != tp->pdev)
11326                         break;
11327                 pci_dev_put(peer);
11328         }
11329         /* 5704 can be configured in single-port mode, set peer to
11330          * tp->pdev in that case.
11331          */
11332         if (!peer) {
11333                 peer = tp->pdev;
11334                 return peer;
11335         }
11336
11337         /*
11338          * We don't need to keep the refcount elevated; there's no way
11339          * to remove one half of this device without removing the other
11340          */
11341         pci_dev_put(peer);
11342
11343         return peer;
11344 }
11345
11346 static void __devinit tg3_init_coal(struct tg3 *tp)
11347 {
11348         struct ethtool_coalesce *ec = &tp->coal;
11349
11350         memset(ec, 0, sizeof(*ec));
11351         ec->cmd = ETHTOOL_GCOALESCE;
11352         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11353         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11354         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11355         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11356         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11357         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11358         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11359         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11360         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11361
11362         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11363                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11364                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11365                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11366                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11367                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11368         }
11369
11370         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11371                 ec->rx_coalesce_usecs_irq = 0;
11372                 ec->tx_coalesce_usecs_irq = 0;
11373                 ec->stats_block_coalesce_usecs = 0;
11374         }
11375 }
11376
11377 static int __devinit tg3_init_one(struct pci_dev *pdev,
11378                                   const struct pci_device_id *ent)
11379 {
11380         static int tg3_version_printed = 0;
11381         unsigned long tg3reg_base, tg3reg_len;
11382         struct net_device *dev;
11383         struct tg3 *tp;
11384         int i, err, pm_cap;
11385         char str[40];
11386         u64 dma_mask, persist_dma_mask;
11387
11388         if (tg3_version_printed++ == 0)
11389                 printk(KERN_INFO "%s", version);
11390
11391         err = pci_enable_device(pdev);
11392         if (err) {
11393                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11394                        "aborting.\n");
11395                 return err;
11396         }
11397
11398         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11399                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11400                        "base address, aborting.\n");
11401                 err = -ENODEV;
11402                 goto err_out_disable_pdev;
11403         }
11404
11405         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11406         if (err) {
11407                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11408                        "aborting.\n");
11409                 goto err_out_disable_pdev;
11410         }
11411
11412         pci_set_master(pdev);
11413
11414         /* Find power-management capability. */
11415         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11416         if (pm_cap == 0) {
11417                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11418                        "aborting.\n");
11419                 err = -EIO;
11420                 goto err_out_free_res;
11421         }
11422
11423         tg3reg_base = pci_resource_start(pdev, 0);
11424         tg3reg_len = pci_resource_len(pdev, 0);
11425
11426         dev = alloc_etherdev(sizeof(*tp));
11427         if (!dev) {
11428                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11429                 err = -ENOMEM;
11430                 goto err_out_free_res;
11431         }
11432
11433         SET_MODULE_OWNER(dev);
11434         SET_NETDEV_DEV(dev, &pdev->dev);
11435
11436 #if TG3_VLAN_TAG_USED
11437         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11438         dev->vlan_rx_register = tg3_vlan_rx_register;
11439         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11440 #endif
11441
11442         tp = netdev_priv(dev);
11443         tp->pdev = pdev;
11444         tp->dev = dev;
11445         tp->pm_cap = pm_cap;
11446         tp->mac_mode = TG3_DEF_MAC_MODE;
11447         tp->rx_mode = TG3_DEF_RX_MODE;
11448         tp->tx_mode = TG3_DEF_TX_MODE;
11449         tp->mi_mode = MAC_MI_MODE_BASE;
11450         if (tg3_debug > 0)
11451                 tp->msg_enable = tg3_debug;
11452         else
11453                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11454
11455         /* The word/byte swap controls here control register access byte
11456          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11457          * setting below.
11458          */
11459         tp->misc_host_ctrl =
11460                 MISC_HOST_CTRL_MASK_PCI_INT |
11461                 MISC_HOST_CTRL_WORD_SWAP |
11462                 MISC_HOST_CTRL_INDIR_ACCESS |
11463                 MISC_HOST_CTRL_PCISTATE_RW;
11464
11465         /* The NONFRM (non-frame) byte/word swap controls take effect
11466          * on descriptor entries, anything which isn't packet data.
11467          *
11468          * The StrongARM chips on the board (one for tx, one for rx)
11469          * are running in big-endian mode.
11470          */
11471         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11472                         GRC_MODE_WSWAP_NONFRM_DATA);
11473 #ifdef __BIG_ENDIAN
11474         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11475 #endif
11476         spin_lock_init(&tp->lock);
11477         spin_lock_init(&tp->tx_lock);
11478         spin_lock_init(&tp->indirect_lock);
11479         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11480
11481         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11482         if (tp->regs == 0UL) {
11483                 printk(KERN_ERR PFX "Cannot map device registers, "
11484                        "aborting.\n");
11485                 err = -ENOMEM;
11486                 goto err_out_free_dev;
11487         }
11488
11489         tg3_init_link_config(tp);
11490
11491         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11492         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11493         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11494
11495         dev->open = tg3_open;
11496         dev->stop = tg3_close;
11497         dev->get_stats = tg3_get_stats;
11498         dev->set_multicast_list = tg3_set_rx_mode;
11499         dev->set_mac_address = tg3_set_mac_addr;
11500         dev->do_ioctl = tg3_ioctl;
11501         dev->tx_timeout = tg3_tx_timeout;
11502         dev->poll = tg3_poll;
11503         dev->ethtool_ops = &tg3_ethtool_ops;
11504         dev->weight = 64;
11505         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11506         dev->change_mtu = tg3_change_mtu;
11507         dev->irq = pdev->irq;
11508 #ifdef CONFIG_NET_POLL_CONTROLLER
11509         dev->poll_controller = tg3_poll_controller;
11510 #endif
11511
11512         err = tg3_get_invariants(tp);
11513         if (err) {
11514                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11515                        "aborting.\n");
11516                 goto err_out_iounmap;
11517         }
11518
11519         /* The EPB bridge inside 5714, 5715, and 5780 and any
11520          * device behind the EPB cannot support DMA addresses > 40-bit.
11521          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11522          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11523          * do DMA address check in tg3_start_xmit().
11524          */
11525         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11526                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11527         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11528                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11529 #ifdef CONFIG_HIGHMEM
11530                 dma_mask = DMA_64BIT_MASK;
11531 #endif
11532         } else
11533                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11534
11535         /* Configure DMA attributes. */
11536         if (dma_mask > DMA_32BIT_MASK) {
11537                 err = pci_set_dma_mask(pdev, dma_mask);
11538                 if (!err) {
11539                         dev->features |= NETIF_F_HIGHDMA;
11540                         err = pci_set_consistent_dma_mask(pdev,
11541                                                           persist_dma_mask);
11542                         if (err < 0) {
11543                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11544                                        "DMA for consistent allocations\n");
11545                                 goto err_out_iounmap;
11546                         }
11547                 }
11548         }
11549         if (err || dma_mask == DMA_32BIT_MASK) {
11550                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11551                 if (err) {
11552                         printk(KERN_ERR PFX "No usable DMA configuration, "
11553                                "aborting.\n");
11554                         goto err_out_iounmap;
11555                 }
11556         }
11557
11558         tg3_init_bufmgr_config(tp);
11559
11560 #if TG3_TSO_SUPPORT != 0
11561         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11562                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11563         }
11564         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11566             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11567             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11568                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11569         } else {
11570                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11571         }
11572
11573         /* TSO is on by default on chips that support hardware TSO.
11574          * Firmware TSO on older chips gives lower performance, so it
11575          * is off by default, but can be enabled using ethtool.
11576          */
11577         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11578                 dev->features |= NETIF_F_TSO;
11579                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11580                         dev->features |= NETIF_F_TSO6;
11581         }
11582
11583 #endif
11584
11585         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11586             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11587             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11588                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11589                 tp->rx_pending = 63;
11590         }
11591
11592         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11593             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11594                 tp->pdev_peer = tg3_find_peer(tp);
11595
11596         err = tg3_get_device_address(tp);
11597         if (err) {
11598                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11599                        "aborting.\n");
11600                 goto err_out_iounmap;
11601         }
11602
11603         /*
11604          * Reset chip in case UNDI or EFI driver did not shutdown
11605          * DMA self test will enable WDMAC and we'll see (spurious)
11606          * pending DMA on the PCI bus at that point.
11607          */
11608         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11609             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11610                 pci_save_state(tp->pdev);
11611                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11612                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11613         }
11614
11615         err = tg3_test_dma(tp);
11616         if (err) {
11617                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11618                 goto err_out_iounmap;
11619         }
11620
11621         /* Tigon3 can do ipv4 only... and some chips have buggy
11622          * checksumming.
11623          */
11624         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11625                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11626                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11627                         dev->features |= NETIF_F_HW_CSUM;
11628                 else
11629                         dev->features |= NETIF_F_IP_CSUM;
11630                 dev->features |= NETIF_F_SG;
11631                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11632         } else
11633                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11634
11635         /* flow control autonegotiation is default behavior */
11636         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11637
11638         tg3_init_coal(tp);
11639
11640         /* Now that we have fully setup the chip, save away a snapshot
11641          * of the PCI config space.  We need to restore this after
11642          * GRC_MISC_CFG core clock resets and some resume events.
11643          */
11644         pci_save_state(tp->pdev);
11645
11646         err = register_netdev(dev);
11647         if (err) {
11648                 printk(KERN_ERR PFX "Cannot register net device, "
11649                        "aborting.\n");
11650                 goto err_out_iounmap;
11651         }
11652
11653         pci_set_drvdata(pdev, dev);
11654
11655         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11656                dev->name,
11657                tp->board_part_number,
11658                tp->pci_chip_rev_id,
11659                tg3_phy_string(tp),
11660                tg3_bus_string(tp, str),
11661                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11662
11663         for (i = 0; i < 6; i++)
11664                 printk("%2.2x%c", dev->dev_addr[i],
11665                        i == 5 ? '\n' : ':');
11666
11667         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11668                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11669                "TSOcap[%d] \n",
11670                dev->name,
11671                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11672                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11673                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11674                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11675                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11676                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11677                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11678         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11679                dev->name, tp->dma_rwctrl,
11680                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11681                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11682
11683         netif_carrier_off(tp->dev);
11684
11685         return 0;
11686
11687 err_out_iounmap:
11688         if (tp->regs) {
11689                 iounmap(tp->regs);
11690                 tp->regs = NULL;
11691         }
11692
11693 err_out_free_dev:
11694         free_netdev(dev);
11695
11696 err_out_free_res:
11697         pci_release_regions(pdev);
11698
11699 err_out_disable_pdev:
11700         pci_disable_device(pdev);
11701         pci_set_drvdata(pdev, NULL);
11702         return err;
11703 }
11704
11705 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11706 {
11707         struct net_device *dev = pci_get_drvdata(pdev);
11708
11709         if (dev) {
11710                 struct tg3 *tp = netdev_priv(dev);
11711
11712                 flush_scheduled_work();
11713                 unregister_netdev(dev);
11714                 if (tp->regs) {
11715                         iounmap(tp->regs);
11716                         tp->regs = NULL;
11717                 }
11718                 free_netdev(dev);
11719                 pci_release_regions(pdev);
11720                 pci_disable_device(pdev);
11721                 pci_set_drvdata(pdev, NULL);
11722         }
11723 }
11724
11725 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11726 {
11727         struct net_device *dev = pci_get_drvdata(pdev);
11728         struct tg3 *tp = netdev_priv(dev);
11729         int err;
11730
11731         if (!netif_running(dev))
11732                 return 0;
11733
11734         flush_scheduled_work();
11735         tg3_netif_stop(tp);
11736
11737         del_timer_sync(&tp->timer);
11738
11739         tg3_full_lock(tp, 1);
11740         tg3_disable_ints(tp);
11741         tg3_full_unlock(tp);
11742
11743         netif_device_detach(dev);
11744
11745         tg3_full_lock(tp, 0);
11746         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11747         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11748         tg3_full_unlock(tp);
11749
11750         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11751         if (err) {
11752                 tg3_full_lock(tp, 0);
11753
11754                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11755                 if (tg3_restart_hw(tp, 1))
11756                         goto out;
11757
11758                 tp->timer.expires = jiffies + tp->timer_offset;
11759                 add_timer(&tp->timer);
11760
11761                 netif_device_attach(dev);
11762                 tg3_netif_start(tp);
11763
11764 out:
11765                 tg3_full_unlock(tp);
11766         }
11767
11768         return err;
11769 }
11770
11771 static int tg3_resume(struct pci_dev *pdev)
11772 {
11773         struct net_device *dev = pci_get_drvdata(pdev);
11774         struct tg3 *tp = netdev_priv(dev);
11775         int err;
11776
11777         if (!netif_running(dev))
11778                 return 0;
11779
11780         pci_restore_state(tp->pdev);
11781
11782         err = tg3_set_power_state(tp, PCI_D0);
11783         if (err)
11784                 return err;
11785
11786         netif_device_attach(dev);
11787
11788         tg3_full_lock(tp, 0);
11789
11790         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11791         err = tg3_restart_hw(tp, 1);
11792         if (err)
11793                 goto out;
11794
11795         tp->timer.expires = jiffies + tp->timer_offset;
11796         add_timer(&tp->timer);
11797
11798         tg3_netif_start(tp);
11799
11800 out:
11801         tg3_full_unlock(tp);
11802
11803         return err;
11804 }
11805
11806 static struct pci_driver tg3_driver = {
11807         .name           = DRV_MODULE_NAME,
11808         .id_table       = tg3_pci_tbl,
11809         .probe          = tg3_init_one,
11810         .remove         = __devexit_p(tg3_remove_one),
11811         .suspend        = tg3_suspend,
11812         .resume         = tg3_resume
11813 };
11814
11815 static int __init tg3_init(void)
11816 {
11817         return pci_module_init(&tg3_driver);
11818 }
11819
11820 static void __exit tg3_cleanup(void)
11821 {
11822         pci_unregister_driver(&tg3_driver);
11823 }
11824
11825 module_init(tg3_init);
11826 module_exit(tg3_cleanup);